diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 636c70e40e4..34cc5186a2e 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,51 +1 @@
-bootstrap.sh @deepthi
-/config/mycnf/ @deepthi @shlomi-noach @mattlord
-/docker/ @derekperkins @dkhenry @mattlord
-/examples/compose @shlomi-noach
-/examples/demo @sougou @mattlord
-/examples/legacy_local @deepthi
-/examples/local @rohit-nayak-ps @frouioui @mattlord
-/examples/operator @GuptaManan100 @frouioui
-/examples/region_sharding @deepthi
-/java/ @harshit-gangal
-/go/cache @vmg
-/go/cmd @ajm188
-/go/cmd/vtadmin @ajm188 @doeg @notfelineit
-/go/cmd/vtctldclient @ajm188 @notfelineit
-/go/internal/flag @ajm188
-/go/mysql @harshit-gangal @systay @mattlord
-/go/protoutil @ajm188
-/go/test/endtoend/onlineddl @shlomi-noach
-/go/test/endtoend/vtorc @deepthi @shlomi-noach @GuptaManan100
-/go/test/endtoend/messaging @mattlord @rohit-nayak-ps @derekperkins
-/go/test/endtoend/vtgate @harshit-gangal @systay @frouioui
-/go/vt/discovery @deepthi
-/go/vt/mysqlctl @deepthi @mattlord
-/go/vt/vtorc @deepthi @shlomi-noach @GuptaManan100
-/go/vt/proto/vtadmin @ajm188 @doeg @notfelineit
-/go/vt/schema @shlomi-noach
-/go/vt/servenv @deepthi @ajm188
-/go/vt/sqlparser @harshit-gangal @systay @GuptaManan100
-/go/vt/srvtopo @rafael
-/go/vt/topo @deepthi @rafael
-/go/vt/vtadmin @ajm188 @doeg @notfelineit @rohit-nayak-ps
-/go/vt/vtctl @deepthi
-/go/vt/vtctl/vtctl.go @ajm188 @notfelineit
-/go/vt/vtctl/grpcvtctldclient @ajm188 @notfelineit
-/go/vt/vtctl/grpcvtctldserver @ajm188 @notfelineit
-/go/vt/vtctl/vtctldclient @ajm188 @notfelineit
-/go/vt/vtctld @ajm188 @deepthi @notfelineit @rohit-nayak-ps
-/go/vt/vtexplain @systay
-/go/vt/vtgate @harshit-gangal @systay @frouioui
-/go/vt/vttablet/tabletmanager @deepthi @shlomi-noach
-/go/vt/vttablet/tabletmanager/vreplication @rohit-nayak-ps @mattlord
-/go/vt/vttablet/tabletmanager/vstreamer @rohit-nayak-ps @mattlord
-/go/vt/vttablet/tabletserver @harshit-gangal @systay @shlomi-noach
-/go/vt/vttablet/tabletserver/messager @mattlord @rohit-nayak-ps @derekperkins
-/go/vt/wrangler @deepthi @rohit-nayak-ps @mattlord
-/go/vt/workflow @rohit-nayak-ps @mattlord
-/proto/vtadmin.proto @ajm188 @doeg @notfelineit
-/proto/vtctldata.proto @ajm188 @notfelineit
-/proto/vtctlservice.proto @ajm188 @notfelineit
-/web/vtadmin @ajm188 @doeg @notfelineit
-/web/vtctld2 @notfelineit @rohit-nayak-ps
+* @slackhq/vitess-approvers
diff --git a/.github/docker/cluster_test_vtorc/Dockerfile b/.github/docker/cluster_test_vtorc/Dockerfile
deleted file mode 100644
index 8042d96eb1a..00000000000
--- a/.github/docker/cluster_test_vtorc/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
-
-FROM "${image}"
-
-USER root
-
-# Re-copy sources from working tree
-RUN rm -rf /vt/src/vitess.io/vitess/*
-COPY . /vt/src/vitess.io/vitess
-
-# Set the working directory
-WORKDIR /vt/src/vitess.io/vitess
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-
-USER vitess
-
-# Set environment variables
-ENV VTROOT /vt/src/vitess.io/vitess
-# Set the vtdataroot such that it uses the volume mount
-ENV VTDATAROOT /vt/vtdataroot
-
-# create the vtdataroot directory
-RUN mkdir -p $VTDATAROOT
-
-# install goimports
-RUN go install golang.org/x/tools/cmd/goimports@latest
-
-# sleep for 50 minutes
-CMD sleep 3000
diff --git a/.github/docker/cluster_test_vtorc_mysql57/Dockerfile b/.github/docker/cluster_test_vtorc_mysql57/Dockerfile
deleted file mode 100644
index 0497e4112d9..00000000000
--- a/.github/docker/cluster_test_vtorc_mysql57/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
-
-FROM "${image}"
-
-USER root
-
-# Re-copy sources from working tree
-RUN rm -rf /vt/src/vitess.io/vitess/*
-COPY . /vt/src/vitess.io/vitess
-
-# Set the working directory
-WORKDIR /vt/src/vitess.io/vitess
-
-# Fix permissions
-RUN chown -R vitess:vitess /vt
-
-USER vitess
-
-# Set environment variables
-ENV VTROOT /vt/src/vitess.io/vitess
-# Set the vtdataroot such that it uses the volume mount
-ENV VTDATAROOT /vt/vtdataroot
-
-# create the vtdataroot directory
-RUN mkdir -p $VTDATAROOT
-
-# install goimports
-RUN go install golang.org/x/tools/cmd/goimports@latest
-
-# sleep for 50 minutes
-CMD sleep 3000
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 8acc1f57f72..d32f01a6668 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -18,7 +18,7 @@
## Checklist
-- [ ] "Backport me!" label has been added if this change should be backported
+- [ ] "Backport to:" labels have been added if this change should be back-ported
- [ ] Tests were added or are not required
- [ ] Documentation was added or is not required
diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml
new file mode 100644
index 00000000000..f033c0c10e0
--- /dev/null
+++ b/.github/workflows/assign_milestone.yml
@@ -0,0 +1,30 @@
+name: Assign Milestone
+
+on:
+ pull_request_target:
+ types: [opened]
+
+permissions: read-all
+
+env:
+ GH_TOKEN: ${{ github.token }}
+
+jobs:
+ build:
+ name: Assign Milestone
+ runs-on: ubuntu-22.04
+ permissions:
+ pull-requests: write
+
+ steps:
+ - name: Set up Go
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.21.10
+
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Assign Milestone
+ run: |
+ gh pr edit ${{ github.event.number }} --milestone "v$(sed -n 's/.*versionName.*\"\([[:digit:]\.]*\).*\"/\1/p' ./go/vt/servenv/version.go)"
diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml
index aebd68686bc..ba28250c63b 100644
--- a/.github/workflows/check_make_vtadmin_authz_testgen.yml
+++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml
@@ -5,20 +5,20 @@ jobs:
build:
name: Check Make vtadmin_authz_testgen
runs-on: ubuntu-latest
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -31,16 +31,20 @@ jobs:
- 'bootstrap.sh'
- 'tools/**'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'Makefile'
- 'go/vt/vtadmin/**'
- '.github/workflows/check_make_vtadmin_authz_testgen.yml'
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml
index 970ce9190c0..13a26019aba 100644
--- a/.github/workflows/check_make_vtadmin_web_proto.yml
+++ b/.github/workflows/check_make_vtadmin_web_proto.yml
@@ -10,15 +10,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -31,7 +28,8 @@ jobs:
- 'bootstrap.sh'
- 'tools/**'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'Makefile'
- 'go/vt/proto/**'
- 'proto/*.proto'
@@ -39,10 +37,10 @@ jobs:
- '.github/workflows/check_make_vtadmin_web_proto.yml'
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
with:
- go-version: 1.18.5
+ go-version: 1.21.10
- name: Setup Node
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml
index c44c34d2d07..e6d2ec128e2 100644
--- a/.github/workflows/cluster_endtoend_12.yml
+++ b/.github/workflows/cluster_endtoend_12.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (12)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (12)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 12 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard 12
diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml
index 97241444e19..36d5d19b2aa 100644
--- a/.github/workflows/cluster_endtoend_13.yml
+++ b/.github/workflows/cluster_endtoend_13.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (13)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (13)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 13 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard 13
diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml
index 3d8e22ea659..aa918d61374 100644
--- a/.github/workflows/cluster_endtoend_15.yml
+++ b/.github/workflows/cluster_endtoend_15.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (15)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (15)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 15 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard 15
diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml
index 9913dbd59a2..f06a4a9a0e7 100644
--- a/.github/workflows/cluster_endtoend_18.yml
+++ b/.github/workflows/cluster_endtoend_18.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (18)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (18)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -95,37 +96,20 @@ jobs:
run: |
make tools
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 18 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard 18
diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml
index cfa90240080..50122367128 100644
--- a/.github/workflows/cluster_endtoend_21.yml
+++ b/.github/workflows/cluster_endtoend_21.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (21)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (21)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 21 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard 21
diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml
index add0cbc0a54..f94559d1f44 100644
--- a/.github/workflows/cluster_endtoend_22.yml
+++ b/.github/workflows/cluster_endtoend_22.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (22)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (22)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 22 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard 22
diff --git a/.github/workflows/cluster_endtoend_26.yml b/.github/workflows/cluster_endtoend_26.yml
deleted file mode 100644
index ddaab899bfb..00000000000
--- a/.github/workflows/cluster_endtoend_26.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (26)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (26)')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (26)
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_26.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
- sudo apt-get update
-
- # Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
- sudo service mysql stop
- sudo service etcd stop
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
- go mod download
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard 26 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
index 17ac8cac8c8..da7acb5c0bd 100644
--- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
+++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (ers_prs_newfeatures_heavy)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (ers_prs_newfeatures_heavy)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard ers_prs_newfeatures_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard ers_prs_newfeatures_heavy
diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml
index 2d92a6d03d3..69d08e7ac80 100644
--- a/.github/workflows/cluster_endtoend_mysql80.yml
+++ b/.github/workflows/cluster_endtoend_mysql80.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (mysql80)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (mysql80)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard mysql80 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard mysql80
diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
index a8fef50ea6f..289d6001d09 100644
--- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml
+++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (mysql_server_vault)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (mysql_server_vault)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -95,37 +96,20 @@ jobs:
run: |
make tools
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard mysql_server_vault | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard mysql_server_vault
diff --git a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml
index 94855bfb3a1..b7ab2f85680 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_declarative)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_declarative)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_declarative | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_declarative
diff --git a/.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml
deleted file mode 100644
index 3c7fca00d06..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_declarative) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_declarative) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_declarative) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_declarative_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_declarative | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
index 269db6e959d..24a1c788ea4 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_ghost)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
deleted file mode 100644
index c10dab28517..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_ghost) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
index ca3329096d6..912d8f0c37f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revert)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_revert)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
deleted file mode 100644
index 0835553d39a..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_revert) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revert) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_revert) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revertible.yml b/.github/workflows/cluster_endtoend_onlineddl_revertible.yml
index a750825d3e9..cc6f210da51 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revertible.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revertible.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revertible)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_revertible)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revertible | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revertible
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml
deleted file mode 100644
index cfce8ad2eab..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_revertible) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revertible) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_revertible) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_revertible_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revertible | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
index b51711df734..6a8411adbcc 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_scheduler)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
deleted file mode 100644
index dec20500256..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_scheduler) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_singleton.yml b/.github/workflows/cluster_endtoend_onlineddl_singleton.yml
index 054fc039cdf..af4d0e12b2c 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_singleton.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_singleton.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_singleton)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_singleton)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_singleton | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_singleton
diff --git a/.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml
deleted file mode 100644
index 40ae6050d35..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_singleton) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_singleton) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_singleton) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_singleton_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_singleton | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
index 1ad7306f34b..e4dd20df5ba 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
deleted file mode 100644
index b82d19217d7..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_vrepl) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_vrepl) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
index ffe0cf531b1..74e7e3f0377 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
deleted file mode 100644
index 0e38ecf0e70..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_vrepl_stress) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
index 9d684ec9ea7..0cc07824564 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress_suite)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
deleted file mode 100644
index 81c586269a0..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_vrepl_stress_suite) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress_suite) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
index 74f40632d58..d92cbd68114 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_suite)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_suite)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
deleted file mode 100644
index 6e358a6261f..00000000000
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (onlineddl_vrepl_suite) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_suite) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
index 22ef9c7932e..3993523b69c 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (schemadiff_vrepl)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (schemadiff_vrepl)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
deleted file mode 100644
index b33417798d6..00000000000
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (schemadiff_vrepl) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (schemadiff_vrepl) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (schemadiff_vrepl) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
index 47bce7c735f..13b7c895ad7 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_consul)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_consul)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -95,37 +96,20 @@ jobs:
run: |
make tools
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_consul
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
index 89434ba98d5..07de9435835 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_tablegc)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_tablegc)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
deleted file mode 100644
index bf62b36b26b..00000000000
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (tabletmanager_tablegc) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_tablegc) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (tabletmanager_tablegc) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
index 8da831878a6..abe447d1355 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_throttler)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
index 2c72908622e..644869bf85a 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler_custom_config)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_throttler_custom_config)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_custom_config | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_custom_config
diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
index 49e4226c7e7..e336345a22a 100644
--- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml
+++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (topo_connection_cache)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (topo_connection_cache)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard topo_connection_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard topo_connection_cache
diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
index 3a09023463e..38f5c41dc4f 100644
--- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_across_db_versions)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_across_db_versions)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vreplication_across_db_versions | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_across_db_versions
diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml
index 5b836239342..d1bbf7d6f6f 100644
--- a/.github/workflows/cluster_endtoend_vreplication_basic.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_basic)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_basic)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vreplication_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_basic
diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
index fadf85c0bfe..9b04223867c 100644
--- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_cellalias)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_cellalias)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vreplication_cellalias | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_cellalias
diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
index ac914da2a43..84b1bba8700 100644
--- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_migrate_vdiff2_convert_tz)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_migrate_vdiff2_convert_tz)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vreplication_migrate_vdiff2_convert_tz | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_migrate_vdiff2_convert_tz
diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml
index 96427323190..e76dfe09f11 100644
--- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_multicell)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_multicell)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell
diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml
index 7e81ef4d409..99dd499052d 100644
--- a/.github/workflows/cluster_endtoend_vreplication_v2.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_v2)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_v2)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vreplication_v2 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_v2
diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream_failover.yml
index 77d283a46dc..91836917c7f 100644
--- a/.github/workflows/cluster_endtoend_vstream_failover.yml
+++ b/.github/workflows/cluster_endtoend_vstream_failover.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_failover)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vstream_failover)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vstream_failover
diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
index 432a2c3ec39..2e35fe926b9 100644
--- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
+++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_false)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vstream_stoponreshard_false)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false
diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
index f91b8ad230f..4dfbf01a276 100644
--- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
+++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_true)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vstream_stoponreshard_true)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true
diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
index b270a350977..422a37acb8e 100644
--- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
+++ b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_with_keyspaces_to_watch)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch
diff --git a/.github/workflows/cluster_endtoend_vtbackup_transform.yml b/.github/workflows/cluster_endtoend_vtbackup_transform.yml
index e7d12c33484..f1ad6b591af 100644
--- a/.github/workflows/cluster_endtoend_vtbackup_transform.yml
+++ b/.github/workflows/cluster_endtoend_vtbackup_transform.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtbackup_transform)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtbackup_transform)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtbackup_transform | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtbackup_transform
diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
index 3b4f432c113..6f38198a173 100644
--- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtctlbackup_sharded_clustertest_heavy)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtctlbackup_sharded_clustertest_heavy)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtctlbackup_sharded_clustertest_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtctlbackup_sharded_clustertest_heavy
diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
index dee1595a8c0..c7c1c7a7f1c 100644
--- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_concurrentdml)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_concurrentdml)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_concurrentdml | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_concurrentdml
diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
index be3370466ff..d15b3c85e58 100644
--- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_gen4)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_gen4)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_gen4 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_gen4
diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
index 634fb1c8302..fba49ccb7ae 100644
--- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_general_heavy)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_general_heavy)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_general_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_general_heavy
diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
index bf2fc291e33..3fbb294a456 100644
--- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_godriver)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_godriver)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_godriver | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_godriver
diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
index d5436a25868..cb1d85783bb 100644
--- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_partial_keyspace)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_partial_keyspace)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_partial_keyspace -partial-keyspace=true | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_partial_keyspace -partial-keyspace=true
diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml
index e71808afb5c..1ddaa210267 100644
--- a/.github/workflows/cluster_endtoend_vtgate_queries.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_queries)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_queries)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_queries | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_queries
diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
index f0f488a736c..c2fd2c1bc01 100644
--- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_readafterwrite)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_readafterwrite)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_readafterwrite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_readafterwrite
diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
index ef1ea86d716..ffef1a2eb08 100644
--- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_reservedconn)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_reservedconn)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_reservedconn | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_reservedconn
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml
index 09272396754..19e613549c8 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_schema)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
index 912df7ba811..5189a2e71bc 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_schema_tracker)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema_tracker)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema_tracker | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema_tracker
diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
index aa2df3b6717..c691f8ddef2 100644
--- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_tablet_healthcheck_cache)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_tablet_healthcheck_cache)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_tablet_healthcheck_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_tablet_healthcheck_cache
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml
index b7a235f70d4..af7a5ee2068 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_topo)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
index 5ea52a9c797..38a1af44ae8 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_topo_consul)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo_consul)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -95,37 +96,20 @@ jobs:
run: |
make tools
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_consul
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
index 72a3f72e8ab..57462d2a8b1 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_topo_etcd)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo_etcd)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_etcd | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_etcd
diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
index d28fc5fe582..6dfd8eea041 100644
--- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_transaction)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_transaction)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_transaction | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_transaction
diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
index 2f8c871260a..8120034d999 100644
--- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_unsharded)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_unsharded)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_unsharded | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_unsharded
diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
index 6a8c96a8e69..a9ae49f086f 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_vindex_heavy)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vindex_heavy)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,57 +91,40 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_vindex_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_vindex_heavy
diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
index 7026c374fc6..907f4f09a55 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_vschema)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vschema)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,16 +71,17 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- # Setup Percona Server for MySQL 8.0
- sudo apt-get update
- sudo apt-get install -y lsb-release gnupg2 curl
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release setup ps80
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
# Install everything else we need, and configure
- sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,37 +91,20 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard vtgate_vschema | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_vschema
diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml
index 981446f6070..7bfc1cc64fa 100644
--- a/.github/workflows/cluster_endtoend_vtorc.yml
+++ b/.github/workflows/cluster_endtoend_vtorc.yml
@@ -1,76 +1,110 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (vtorc)(mysql80)
+name: Cluster (vtorc)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)(mysql80)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)')
cancel-in-progress: true
jobs:
build:
- name: Run endtoend tests on Cluster (vtorc)(mysql80)
- runs-on: self-hosted
+ name: Run endtoend tests on Cluster (vtorc)
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - '.github/docker/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_vtorc.yml'
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_vtorc.yml'
- - name: Build Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: docker build -f ./.github/docker/cluster_test_vtorc/Dockerfile -t cluster_test_vtorc:$GITHUB_SHA .
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.21.10
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: docker run --name "cluster_test_vtorc_$GITHUB_SHA" cluster_test_vtorc:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard vtorc -- -- --keep-data=true'
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- - name: Print Volume Used
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker inspect -f '{{ (index .Mounts 0).Name }}' cluster_test_vtorc_$GITHUB_SHA
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
- - name: Cleanup Docker Volume
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -v cluster_test_vtorc_$GITHUB_SHA
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
- - name: Cleanup Docker Container
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -f cluster_test_vtorc_$GITHUB_SHA
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
- - name: Cleanup Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker image rm cluster_test_vtorc:$GITHUB_SHA
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vtorc
diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
deleted file mode 100644
index cebe62cbe75..00000000000
--- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
+++ /dev/null
@@ -1,76 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (vtorc)(mysql57)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)(mysql57)')
- cancel-in-progress: true
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (vtorc)(mysql57)
- runs-on: self-hosted
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - '.github/docker/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_vtorc_mysql57.yml'
-
- - name: Build Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: docker build -f ./.github/docker/cluster_test_vtorc_mysql57/Dockerfile -t cluster_test_vtorc_mysql57:$GITHUB_SHA .
-
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: docker run --name "cluster_test_vtorc_mysql57_$GITHUB_SHA" cluster_test_vtorc_mysql57:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard vtorc -- -- --keep-data=true'
-
- - name: Print Volume Used
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker inspect -f '{{ (index .Mounts 0).Name }}' cluster_test_vtorc_mysql57_$GITHUB_SHA
-
- - name: Cleanup Docker Volume
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -v cluster_test_vtorc_mysql57_$GITHUB_SHA
-
- - name: Cleanup Docker Container
- if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker rm -f cluster_test_vtorc_mysql57_$GITHUB_SHA
-
- - name: Cleanup Docker Image
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- docker image rm cluster_test_vtorc_mysql57:$GITHUB_SHA
diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
new file mode 100644
index 00000000000..5543ba4bdef
--- /dev/null
+++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
@@ -0,0 +1,110 @@
+# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
+
+name: Cluster (vttablet_prscomplex)
+on: [push, pull_request]
+concurrency:
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vttablet_prscomplex)')
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Run endtoend tests on Cluster (vttablet_prscomplex)
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
+
+ steps:
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_vttablet_prscomplex.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vttablet_prscomplex
diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml
index 8c9b344bf07..133705caac6 100644
--- a/.github/workflows/cluster_endtoend_xb_backup.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_backup)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (xb_backup)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,6 +71,7 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
+
# Setup Percona Server for MySQL 8.0
sudo apt-get update
sudo apt-get install -y lsb-release gnupg2 curl
@@ -81,6 +82,7 @@ jobs:
# Install everything else we need, and configure
sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,39 +92,22 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- sudo apt-get install percona-xtrabackup-80 lz4
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ sudo apt-get install -y percona-xtrabackup-80 lz4
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard xb_backup
diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
deleted file mode 100644
index e4bbc8c12b2..00000000000
--- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
+++ /dev/null
@@ -1,156 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (xb_backup) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_backup) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
- # This is used if we need to pin the xtrabackup version used in tests.
- # If this is NOT set then the latest version available will be used.
- #XTRABACKUP_VERSION: "2.4.24-1"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (xb_backup) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_xb_backup_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb"
- sudo apt-get install -y gnupg2
- sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb"
- sudo apt-get update
- if [[ -n $XTRABACKUP_VERSION ]]; then
- debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb"
- wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile"
- sudo apt install -y "./$debfile"
- else
- sudo apt-get install -y percona-xtrabackup-24
- fi
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml
index 88fcde631d1..025f0a77afc 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery.yml
@@ -6,30 +6,26 @@ concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_recovery)')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
jobs:
build:
name: Run endtoend tests on Cluster (xb_recovery)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -43,7 +39,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -52,13 +49,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -71,6 +71,7 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
+
# Setup Percona Server for MySQL 8.0
sudo apt-get update
sudo apt-get install -y lsb-release gnupg2 curl
@@ -81,6 +82,7 @@ jobs:
# Install everything else we need, and configure
sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,39 +92,22 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- sudo apt-get install percona-xtrabackup-80 lz4
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ sudo apt-get install -y percona-xtrabackup-80 lz4
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard xb_recovery
diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
deleted file mode 100644
index ae656927322..00000000000
--- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
+++ /dev/null
@@ -1,156 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Cluster (xb_recovery) mysql57
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_recovery) mysql57')
- cancel-in-progress: true
-
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
-
- # This is used if we need to pin the xtrabackup version used in tests.
- # If this is NOT set then the latest version available will be used.
- #XTRABACKUP_VERSION: "2.4.24-1"
-
-jobs:
- build:
- name: Run endtoend tests on Cluster (xb_recovery) mysql57
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- end_to_end:
- - 'go/**/*.go'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Set up python
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
-
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
- sudo service mysql stop
- sudo service etcd stop
-
- # install JUnit report formatter
- go install github.com/vitessio/go-junit-report@HEAD
-
- wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb"
- sudo apt-get install -y gnupg2
- sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb"
- sudo apt-get update
- if [[ -n $XTRABACKUP_VERSION ]]; then
- debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb"
- wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile"
- sudo apt install -y "./$debfile"
- else
- sudo apt-get install -y percona-xtrabackup-24
- fi
-
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- - name: Run cluster endtoend test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml
index 862b3f36234..969af82038d 100644
--- a/.github/workflows/codeql_analysis.yml
+++ b/.github/workflows/codeql_analysis.yml
@@ -39,9 +39,9 @@ jobs:
# queries: security-extended,security-and-quality
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
- name: Get base dependencies
run: |
@@ -55,8 +55,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -75,14 +75,29 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
- - name: Building last release's binaries
- timeout-minutes: 10
+ - name: Building binaries
+ timeout-minutes: 30
run: |
source build.env
make build
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
+
+ - name: Slack Workflow Notification
+ if: ${{ failure() }}
+ uses: Gamesight/slack-workflow-status@master
+ with:
+ repo_token: ${{secrets.GITHUB_TOKEN}}
+ slack_webhook_url: ${{secrets.SLACK_WEBHOOK_URL}}
+ channel: '#codeql'
+ name: 'CodeQL Workflows'
+
+ - name: Fail if needed
+ if: ${{ failure() }}
+ run: |
+ exit 1
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index b087c8e4716..f72254c232b 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -14,22 +14,21 @@ jobs:
steps:
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup node
+ uses: actions/setup-node@v3
+ with:
+ node-version: '18.16.0'
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies
run: |
diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml
index 5b62635be43..3e0ae302ef2 100644
--- a/.github/workflows/docker_test_cluster_10.yml
+++ b/.github/workflows/docker_test_cluster_10.yml
@@ -4,22 +4,23 @@ jobs:
build:
name: Docker Test Cluster 10
- runs-on: ubuntu-latest
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip $skip
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -33,7 +34,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -43,9 +45,12 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -53,14 +58,12 @@ jobs:
echo "value: " ${{steps.skip-workflow.outputs.skip-workflow}}
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Run tests which require docker - 1
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- go run test.go -docker=true --follow -shard 10
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ go run test.go -docker=true --follow -shard 10 -bootstrap-version 14.1
diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml
index 28fec63703a..339ca70ddda 100644
--- a/.github/workflows/docker_test_cluster_25.yml
+++ b/.github/workflows/docker_test_cluster_25.yml
@@ -4,22 +4,23 @@ jobs:
build:
name: Docker Test Cluster 25
- runs-on: ubuntu-latest
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -33,7 +34,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -43,23 +45,24 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Run tests which require docker - 2
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- go run test.go -docker=true --follow -shard 25
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ go run test.go -docker=true --follow -shard 25
diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml
index 4e1a0890ead..53beadb2206 100644
--- a/.github/workflows/e2e_race.yml
+++ b/.github/workflows/e2e_race.yml
@@ -4,21 +4,22 @@ jobs:
build:
name: End-to-End Test (Race)
- runs-on: ubuntu-latest
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -32,7 +33,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -41,9 +43,12 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -53,15 +58,16 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# Setup MySQL 8.0
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
+
# Install everything else we need, and configure
- sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -75,6 +81,10 @@ jobs:
- name: e2e_race
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: |
- make e2e_test_race
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ make e2e_test_race
diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml
index 30eb503733b..57b3f593fb6 100644
--- a/.github/workflows/endtoend.yml
+++ b/.github/workflows/endtoend.yml
@@ -4,21 +4,22 @@ jobs:
build:
name: End-to-End Test
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -32,7 +33,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -41,22 +43,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
@@ -80,6 +78,14 @@ jobs:
- name: endtoend
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: |
- eatmydata -- tools/e2e_test_runner.sh
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ eatmydata -- tools/e2e_test_runner.sh
diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml
index 71ae05a7b2f..4f68c7af47a 100644
--- a/.github/workflows/local_example.yml
+++ b/.github/workflows/local_example.yml
@@ -5,6 +5,9 @@ jobs:
build:
name: Local example using ${{ matrix.topo }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
strategy:
matrix:
os: [ubuntu-latest]
@@ -15,15 +18,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +37,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -47,9 +48,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - uses: actions/setup-node@v3
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
with:
- go-version: 1.18.5
+ # node-version should match package.json
+ node-version: '18.16.0'
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
@@ -60,13 +70,14 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
run: |
if [ ${{matrix.os}} = "ubuntu-latest" ]; then
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# Setup MySQL 8.0
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
-
+
# Install everything else we need, and configure
sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata
sudo service mysql stop
@@ -90,10 +101,14 @@ jobs:
- name: local_example
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
- timeout-minutes: 30
- run: |
- export TOPO=${{matrix.topo}}
- if [ ${{matrix.os}} = "macos-latest" ]; then
- export PATH="/usr/local/opt/mysql@5.7/bin:$PATH"
- fi
- eatmydata -- go run test.go -print-log -follow -retry=1 local_example
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ export TOPO=${{matrix.topo}}
+ if [ ${{matrix.os}} = "macos-latest" ]; then
+ export PATH="/usr/local/opt/mysql@5.7/bin:$PATH"
+ fi
+ eatmydata -- go run test.go -print-log -follow -retry=1 local_example
diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml
index ef69e85bbc1..f07db3c65b6 100644
--- a/.github/workflows/region_example.yml
+++ b/.github/workflows/region_example.yml
@@ -5,6 +5,9 @@ jobs:
build:
name: Region Sharding example using ${{ matrix.topo }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
strategy:
matrix:
os: [ubuntu-latest]
@@ -15,15 +18,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +37,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -47,9 +48,18 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
+ with:
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - uses: actions/setup-node@v3
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
with:
- go-version: 1.18.5
+ # node-version should match package.json
+ node-version: '18.16.0'
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
@@ -60,9 +70,10 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
run: |
if [ ${{matrix.os}} = "ubuntu-latest" ]; then
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# Setup MySQL 8.0
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -90,11 +101,15 @@ jobs:
- name: region_example
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
- timeout-minutes: 30
- run: |
- export TOPO=${{matrix.topo}}
- if [ ${{matrix.os}} = "macos-latest" ]; then
- export PATH="/usr/local/opt/mysql@5.7/bin:$PATH"
- fi
- sed -i 's/user\/my-vitess/runner\/work\/vitess\/vitess/g' examples/region_sharding/main_vschema_sharded.json #set correct path to countries.json
- eatmydata -- go run test.go -print-log -follow -retry=1 region_example
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ export TOPO=${{matrix.topo}}
+ if [ ${{matrix.os}} = "macos-latest" ]; then
+ export PATH="/usr/local/opt/mysql@5.7/bin:$PATH"
+ fi
+ sed -i 's/user\/my-vitess/runner\/work\/vitess\/vitess/g' examples/region_sharding/main_vschema_sharded.json #set correct path to countries.json
+ eatmydata -- go run test.go -print-log -follow -retry=1 region_example
diff --git a/.github/workflows/release_notes_label.yml b/.github/workflows/release_notes_label.yml
deleted file mode 100644
index 220bd07da30..00000000000
--- a/.github/workflows/release_notes_label.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Check Pull Request labels
-on:
- pull_request:
- types: [opened, labeled, unlabeled, synchronize]
-
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Check Pull Request labels')
- cancel-in-progress: true
-
-jobs:
- check_pull_request_labels:
- name: Check Pull Request labels
- timeout-minutes: 10
- runs-on: ubuntu-latest
- if: github.repository == 'vitessio/vitess'
- steps:
- - uses: mheap/github-action-required-labels@v1
- name: Check release notes label
- id: required_label
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- mode: exactly
- count: 0
- labels: "release notes (needs details)"
-
- - name: Print helper
- if: failure() && steps.required_label.outcome == 'failure'
- run: |
- echo The "release notes (needs details)" label is set. The changes made in this Pull Request need to be documented in the release notes summary "('./doc/releasenotes/15_0_0_summary.md')". Once documented, the "release notes (needs details)" label can be removed.
- exit 1
-
- - name: Check type and component labels
- env:
- PR_NUMBER: ${{ github.event.pull_request.number }}
- run: |
- LABELS_JSON="/tmp/labels.json"
- # Get labels for this pull request
- curl -s \
- -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- -H "Accept: application/vnd.github.v3+json" \
- -H "Content-type: application/json" \
- "https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${PR_NUMBER}/labels" \
- > "$LABELS_JSON"
- if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Component:' ; then
- echo "Expecting PR to have label 'Component: ...'"
- exit 1
- fi
- if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Type:' ; then
- echo "Expecting PR to have label 'Type: ...'"
- exit 1
- fi
diff --git a/.github/workflows/sonar_analysis.yml b/.github/workflows/sonar_analysis.yml
index 871e6cc7f83..0a6cb9d186c 100644
--- a/.github/workflows/sonar_analysis.yml
+++ b/.github/workflows/sonar_analysis.yml
@@ -10,22 +10,16 @@ jobs:
steps:
- name: Set up Go
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.13
+ go-version: 1.21.10
- name: Tune the OS
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies
run: |
diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml
index 2f62e4c2c6e..c53e0b15756 100644
--- a/.github/workflows/static_checks_etc.yml
+++ b/.github/workflows/static_checks_etc.yml
@@ -7,28 +7,23 @@ on:
jobs:
build:
name: Static Code Checks Etc
- runs-on: ubuntu-latest
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Checkout code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Run FOSSA scan and upload build data
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: fossa-contrib/fossa-action@v1
- with:
- fossa-api-key: 76d7483ea206d530d9452e44bffe7ba8
+ uses: actions/checkout@v3
- name: Check for changes in Go files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -40,11 +35,12 @@ jobs:
go_files:
- '**/*.go'
- '*.go'
- - 'go.[sumod]'
- - '.github/workflows/static_checks_etc.yml'
+ - 'go.sum'
+ - 'go.mod'
parser_changes:
- 'go/vt/sqlparser/**'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'build.env'
- 'bootstrap.sh'
- 'tools/**'
@@ -53,7 +49,8 @@ jobs:
- 'bootstrap.sh'
- 'tools/**'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'Makefile'
- 'go/vt/proto/**'
- 'proto/*.proto'
@@ -63,7 +60,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'tools/**'
- 'bootstrap.sh'
- '.github/workflows/static_checks_etc.yml'
@@ -72,7 +70,8 @@ jobs:
- 'go/vt/sqlparser/**'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'tools/**'
- 'bootstrap.sh'
- 'misc/git/hooks/asthelpers'
@@ -83,33 +82,35 @@ jobs:
- 'Makefile'
- 'bootstrap.sh'
- '.github/workflows/static_checks_etc.yml'
-
+ ci_config:
+ - 'test/config.json'
+ - '.github/workflows/static_checks_etc.yml'
+ release_notes:
+ - 'changelog/**'
+ - './go/tools/releases/**'
+ - '.github/workflows/static_checks_etc.yml'
- name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true'
- uses: actions/setup-go@v2
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Run go fmt
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
run: |
gofmt -l . | grep -vF vendor/ && exit 1 || echo "All files formatted correctly"
- name: Install goimports
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' || steps.changes.outputs.visitor == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.visitor == 'true')
run: |
go install golang.org/x/tools/cmd/goimports@latest
@@ -120,7 +121,7 @@ jobs:
echo $out | grep go > /dev/null && echo -e "The following files are malformatted:\n$out" && exit 1 || echo "All the files are formatted correctly"
- name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.parser_changes == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.go_files == 'true')
run: |
sudo apt-get update
sudo apt-get install -y make unzip g++ etcd curl git wget
@@ -128,24 +129,24 @@ jobs:
go mod download
- name: Run make minimaltools
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.parser_changes == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.go_files == 'true')
run: |
make minimaltools
- name: check_make_parser
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.parser_changes == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.go_files == 'true')
run: |
- tools/check_make_parser.sh
+ tools/check_make_parser.sh || exit 1
- name: check_make_sizegen
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.sizegen == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.sizegen == 'true' || steps.changes.outputs.go_files == 'true')
run: |
- tools/check_make_sizegen.sh
+ tools/check_make_sizegen.sh || exit 1
- name: check_make_visitor
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.visitor == 'true'
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.visitor == 'true' || steps.changes.outputs.go_files == 'true')
run: |
- misc/git/hooks/asthelpers
+ misc/git/hooks/asthelpers || exit 1
- name: run ensure_bootstrap_version
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -156,7 +157,7 @@ jobs:
- name: Install golangci-lint
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2
+ run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2
- name: Clean Env
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
@@ -168,7 +169,7 @@ jobs:
- name: Run golangci-lint
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
- run: $(go env GOPATH)/bin/golangci-lint run go/...
+ run: $(go env GOPATH)/bin/golangci-lint run go/... --timeout 10m || exit 1
- name: Run go mod tidy
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
@@ -187,4 +188,24 @@ jobs:
- name: check_make_proto
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
run: |
- tools/check_make_proto.sh
+ tools/check_make_proto.sh || exit 1
+
+ - name: Check test/config.json
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.ci_config == 'true')
+ run: |
+ go run ./go/tools/ci-config/main.go || exit 1
+
+ - name: Check changelog
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.release_notes == 'true'
+ run: |
+ set -e
+ go run ./go/tools/releases/releases.go
+ output=$(git status -s)
+ if [ -z "${output}" ]; then
+ exit 0
+ fi
+ echo 'We wish to maintain a consistent changelog directory, please run `go run ./go/tools/releases/releases.go`, commit and push again.'
+ echo 'Running `go run ./go/tools/releases/releases.go` on CI yields the following changes:'
+ echo "$output"
+ echo ""
+ exit 1
diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml
index 82aa3ae6da4..734cec15ebf 100644
--- a/.github/workflows/unit_race.yml
+++ b/.github/workflows/unit_race.yml
@@ -8,21 +8,22 @@ jobs:
build:
name: Unit Test (Race)
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -36,7 +37,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -45,9 +47,12 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
@@ -83,6 +88,15 @@ jobs:
- name: unit_race
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
- run: |
- eatmydata -- make unit_test_race
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ export NOVTADMINBUILD=1
+
+ eatmydata -- make unit_test_race
diff --git a/.github/workflows/unit_test_mariadb103.yml b/.github/workflows/unit_test_mariadb103.yml
deleted file mode 100644
index d8af0b6fd6d..00000000000
--- a/.github/workflows/unit_test_mariadb103.yml
+++ /dev/null
@@ -1,105 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Unit Test (mariadb103)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mariadb103)')
- cancel-in-progress: true
-
-jobs:
- test:
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- unit_tests:
- - 'go/**'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/unit_test_mariadb103.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- export DEBIAN_FRONTEND="noninteractive"
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # mariadb103
- sudo apt-get install -y software-properties-common
- sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
- sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] https://mirror.rackspace.com/mariadb/repo/10.3/ubuntu bionic main'
- sudo apt update
- sudo DEBIAN_FRONTEND="noninteractive" apt install -y mariadb-server
-
- sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata
- sudo service mysql stop
- sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile"
-
- mkdir -p dist bin
- curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist
- mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/
-
- go mod download
- go install golang.org/x/tools/cmd/goimports@latest
-
- - name: Run make tools
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- make tools
-
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
- run: |
- eatmydata -- make unit_test
diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml
deleted file mode 100644
index 5e154874911..00000000000
--- a/.github/workflows/unit_test_mysql57.yml
+++ /dev/null
@@ -1,111 +0,0 @@
-# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-
-name: Unit Test (mysql57)
-on: [push, pull_request]
-concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mysql57)')
- cancel-in-progress: true
-
-jobs:
- test:
- runs-on: ubuntu-20.04
-
- steps:
- - name: Check if workflow needs to be skipped
- id: skip-workflow
- run: |
- skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
- echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
-
- - name: Check out code
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
-
- - name: Check for changes in relevant files
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: frouioui/paths-filter@main
- id: changes
- with:
- token: ''
- filters: |
- unit_tests:
- - 'go/**'
- - 'test.go'
- - 'Makefile'
- - 'build.env'
- - 'go.[sumod]'
- - 'proto/*.proto'
- - 'tools/**'
- - 'config/**'
- - 'bootstrap.sh'
- - '.github/workflows/unit_test_mysql57.yml'
-
- - name: Set up Go
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
- with:
- go-version: 1.18.5
-
- - name: Tune the OS
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
- echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
- sudo sysctl -p /etc/sysctl.conf
-
- - name: Get dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- export DEBIAN_FRONTEND="noninteractive"
- sudo apt-get update
-
- # Uninstall any previously installed MySQL first
- sudo systemctl stop apparmor
- sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
- sudo apt-get -y autoremove
- sudo apt-get -y autoclean
- sudo deluser mysql
- sudo rm -rf /var/lib/mysql
- sudo rm -rf /etc/mysql
-
- # Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
-
- # mysql57
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
- echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
- echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- sudo apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7*
-
- sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata
- sudo service mysql stop
- sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263
- sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
- sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile"
-
- mkdir -p dist bin
- curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist
- mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/
-
- go mod download
- go install golang.org/x/tools/cmd/goimports@latest
-
- - name: Run make tools
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- run: |
- make tools
-
- - name: Run test
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
- run: |
- eatmydata -- make unit_test
diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml
index 04db046170f..1ff7c264c3f 100644
--- a/.github/workflows/unit_test_mysql80.yml
+++ b/.github/workflows/unit_test_mysql80.yml
@@ -8,22 +8,23 @@ concurrency:
jobs:
test:
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -37,7 +38,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -46,9 +48,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
@@ -74,10 +83,10 @@ jobs:
sudo rm -rf /etc/mysql
# Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# mysql80
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -103,6 +112,16 @@ jobs:
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
- run: |
- eatmydata -- make unit_test
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ set -exo pipefail
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ export NOVTADMINBUILD=1
+ eatmydata -- make unit_test
diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml
new file mode 100644
index 00000000000..77ded0a6750
--- /dev/null
+++ b/.github/workflows/update_golang_version.yml
@@ -0,0 +1,81 @@
+name: Update Golang Version
+
+on:
+ schedule:
+ - cron: "0 0 * * *" # Runs every day at midnight UTC
+ workflow_dispatch:
+
+jobs:
+ update_golang_version:
+ strategy:
+ matrix:
+ branch: [ main, release-16.0, release-15.0, release-14.0 ]
+ name: Update Golang Version
+ runs-on: ubuntu-latest
+ steps:
+ - name: Set up Go
+ uses: actions/setup-go@v2
+ with:
+ go-version: 1.21.10
+
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ ref: ${{ matrix.branch }}
+
+ - name: Detect new version and update codebase
+ id: detect-and-update
+ run: |
+ old_go_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get go-version)
+ echo "old-go-version=${old_go_version}" >> $GITHUB_OUTPUT
+
+ if [ ${{ matrix.branch }} == "main" ]; then
+ go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false --main --allow-major-upgrade
+ else
+ go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false
+ fi
+
+ output=$(git status -s)
+ if [ -z "${output}" ]; then
+ exit 0
+ fi
+
+ go_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get go-version)
+ bootstrap_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get bootstrap-version)
+ echo "go-version=${go_version}" >> $GITHUB_OUTPUT
+ echo "bootstrap-version=${bootstrap_version}" >> $GITHUB_OUTPUT
+
+ # Check if the PR already exists, if it does then do not create new PR.
+ gh pr list -S "is:open [${{ matrix.branch }}] Upgrade the Golang version to go${go_version}" | grep "OPEN"
+ if [ $? -eq 0 ]; then
+ exit 0
+ fi
+
+ echo "create-pr=true" >> $GITHUB_OUTPUT
+
+ - name: Create Pull Request
+ if: steps.detect-and-update.outputs.create-pr == 'true'
+ uses: peter-evans/create-pull-request@v4
+ with:
+ branch: "upgrade-go-to-${{steps.detect-and-update.outputs.go-version}}-on-${{ matrix.branch }}"
+ commit-message: "bump go version to go${{steps.detect-and-update.outputs.go-version}}"
+ signoff: true
+ delete-branch: true
+ title: "[${{ matrix.branch }}] Upgrade the Golang version to `go${{steps.detect-and-update.outputs.go-version}}`"
+ body: |
+ This Pull Request bumps the Golang version to `go${{steps.detect-and-update.outputs.go-version}}` and the bootstrap version to `${{steps.detect-and-update.outputs.bootstrap-version}}`.
+
+ > Do not trust the bot blindly. A thorough code review must be done to ensure all the files have been correctly modified.
+
+ There are a few manual steps remaining:
+ - [ ] Make sure you update the Golang version used in the previous and next release branches for the Upgrade/Downgrade tests.
+ - [ ] Build and Push the bootstrap images to Docker Hub, the bot cannot handle that.
+ - [ ] Update the `./.github/workflows/*.yml` files with the newer Golang version, the bot cannot handle that due to permissions.
+ - To accomplish this, run the following: `go run ./go/tools/go-upgrade/go-upgrade.go upgrade workflows --go-to=${{steps.detect-and-update.outputs.go-version}}`
+ base: ${{ matrix.branch }}
+ labels: |
+ Skip CI
+ go
+ Benchmark me
+ Component: General
+ Type: CI/Build
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
index 49b07e77cab..2853f76f45b 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
+ name: Get Previous Release - Backups - E2E
runs-on: ubuntu-latest
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,13 +26,17 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test_e2e:
timeout-minutes: 60
if: always() && needs.get_previous_release.result == 'success'
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - E2E
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_previous_release
@@ -41,15 +45,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -64,7 +65,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -73,13 +75,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -103,13 +108,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -131,7 +137,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -160,12 +166,17 @@ jobs:
# Run test with VTTablet at version N-1 and VTBackup at version N
- name: Run backups tests (vttablet=N-1, vtbackup=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
- set -x
- source build.env
- eatmydata -- go run test.go -skip-build -docker=false -print-log -follow -tag upgrade_downgrade_backups
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+ set -x
+ source build.env
+ eatmydata -- go run test.go -skip-build -docker=false -print-log -follow -tag upgrade_downgrade_backups
# Swap binaries again, use current version's VTTablet, and last release's VTBackup
- name: Use current version VTTablet, and other version VTBackup
@@ -182,9 +193,14 @@ jobs:
# Run test again with VTTablet at version N, and VTBackup at version N-1
- name: Run backups tests (vttablet=N, vtbackup=N-1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
- set -x
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_backups
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+ set -x
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_backups
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
index 882b09f3f53..4f05cf9144b 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
+ name: Get Latest Release - Backups - E2E - Next Release
runs-on: ubuntu-latest
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,13 +26,17 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test_e2e:
timeout-minutes: 60
if: always() && needs.get_next_release.result == 'success'
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - E2E - Next Release
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_next_release
@@ -41,18 +45,15 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -67,7 +68,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -76,13 +78,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -106,13 +111,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -134,7 +140,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -163,12 +169,17 @@ jobs:
# Run test with VTTablet at version N+1 and VTBackup at version N
- name: Run backups tests (vttablet=N+1, vtbackup=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
- set -x
- source build.env
- eatmydata -- go run test.go -skip-build -docker=false -print-log -follow -tag upgrade_downgrade_backups
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+ set -x
+ source build.env
+ eatmydata -- go run test.go -skip-build -docker=false -print-log -follow -tag upgrade_downgrade_backups
# Swap binaries again, use current version's VTTablet, and next release's VTBackup
- name: Use current version VTTablet, and other version VTBackup
@@ -185,9 +196,14 @@ jobs:
# Run test again with VTTablet at version N, and VTBackup at version N+1
- name: Run backups tests (vttablet=N, vtbackup=N+1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
- set -x
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_backups
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+ set -x
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_backups
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
index 4abc49e18e4..e5a6603a3e8 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get a recent LTS release
+ name: Get Previous Release - Backups - Manual
runs-on: ubuntu-20.04
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,14 +26,18 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
# This job usually execute in ± 20 minutes
upgrade_downgrade_test_manual:
timeout-minutes: 40
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - Manual
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_previous_release
@@ -42,16 +46,13 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +67,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,26 +77,22 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
@@ -109,26 +107,12 @@ jobs:
sudo rm -rf /etc/mysql
# Install MySQL 8.0
- ####
- ## Temporarily pin the MySQL version at 8.0.29 as Vitess 14.0.1 does not have the fix to support
- ## backups of 8.0.30+. See: https://github.com/vitessio/vitess/pull/10847
- ## TODO: remove this pin once the above fixes are included in a v14 release (will be in v14.0.2) OR
- ## Vitess 16.0.0-SNAPSHOT becomes the dev version on vitessio/main
- #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- #wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
- #echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
- #sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- #sudo apt-get update
- #sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
- ####
- wget -c https://cdn.mysql.com/archives/mysql-8.0/mysql-common_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-plugins_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-client_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client_8.0.28-1ubuntu20.04_amd64.deb
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y ./mysql-*.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
# Install everything else we need, and configure
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata grep
@@ -144,13 +128,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -172,7 +157,7 @@ jobs:
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
index 7adb54aaabb..38a2236e74c 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
@@ -10,14 +10,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get a recent LTS release
+ name: Get Previous Release - Backups - Manual - Next Release
runs-on: ubuntu-20.04
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -26,14 +26,18 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
# This job usually execute in ± 20 minutes
upgrade_downgrade_test_manual:
timeout-minutes: 40
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-20.04
+ name: Run Upgrade Downgrade Test - Backups - Manual - Next Release
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_next_release
@@ -42,19 +46,16 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +70,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,26 +80,22 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range
- # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185
- - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts
- # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED!
-
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
@@ -112,26 +110,12 @@ jobs:
sudo rm -rf /etc/mysql
# Install MySQL 8.0
- ####
- ## Temporarily pin the MySQL version at 8.0.29 as Vitess 14.0.1 does not have the fix to support
- ## backups of 8.0.30+. See: https://github.com/vitessio/vitess/pull/10847
- ## TODO: remove this pin once the above fixes are included in a v14 release (will be in v14.0.2) OR
- ## Vitess 16.0.0-SNAPSHOT becomes the dev version on vitessio/main
- #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- #wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
- #echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
- #sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
- #sudo apt-get update
- #sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
- ####
- wget -c https://cdn.mysql.com/archives/mysql-8.0/mysql-common_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client-plugins_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-client_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server-core_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-server_8.0.28-1ubuntu20.04_amd64.deb \
- https://cdn.mysql.com/archives/mysql-8.0/mysql-community-client_8.0.28-1ubuntu20.04_amd64.deb
- sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y ./mysql-*.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
# Install everything else we need, and configure
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata grep
@@ -147,13 +131,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -175,7 +160,7 @@ jobs:
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
index b4965b9c254..204ef44f4fa 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
+ name: Get Previous Release - Query Serving (Queries)
runs-on: ubuntu-latest
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Queries)
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_previous_release
@@ -43,15 +47,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +67,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,13 +77,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -101,8 +106,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -121,13 +126,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +155,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -173,7 +179,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
- name: Use last release's VTGate
@@ -188,12 +194,17 @@ jobs:
# Running a test with vtgate at version n-1 and vttablet at version n
- name: Run query serving tests (vtgate=N-1, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
- name: Use current version VTGate, and other version VTTablet
@@ -210,9 +221,14 @@ jobs:
# Running a test with vtgate at version n and vttablet at version n-1
- name: Run query serving tests (vtgate=N, vttablet=N-1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
index 56a86fdd8fc..6a6f6850552 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
+ name: Get Latest Release - Query Serving (Queries) Next Release
runs-on: ubuntu-latest
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_next_release
@@ -43,18 +47,15 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +70,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,13 +80,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,8 +109,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -124,13 +129,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +158,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -171,12 +177,17 @@ jobs:
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
- name: Use next release's VTGate
@@ -196,7 +207,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
- name: Use current version VTGate, and other version VTTablet
@@ -213,9 +224,14 @@ jobs:
# Running a test with vtgate at version n and vttablet at version n+1
- name: Run query serving tests (vtgate=N, vttablet=N+1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
index b3399dd3845..e4787a5facf 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
+ name: Get Previous Release - Query Serving (Schema)
runs-on: ubuntu-latest
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Query Serving (Schema)
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_previous_release
@@ -43,15 +47,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +67,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,13 +77,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -101,8 +106,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -121,13 +126,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +155,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -168,12 +174,17 @@ jobs:
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
- name: Use last release's VTGate
@@ -193,7 +204,7 @@ jobs:
mkdir -p /tmp/vtdataroot
source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
- name: Use current version VTGate, and other version VTTablet
@@ -210,9 +221,14 @@ jobs:
# Running a test with vtgate at version n and vttablet at version n-1
- name: Run query serving tests (vtgate=N, vttablet=N-1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
index c0b1052ab23..3c1b3d476f2 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
+ name: Get Latest Release - Query Serving (Schema) Next Release
runs-on: ubuntu-latest
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,15 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
+ name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release
runs-on: ubuntu-latest
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_next_release
@@ -43,18 +46,15 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +69,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,13 +79,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,8 +108,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -124,13 +128,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +157,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -171,12 +176,17 @@ jobs:
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
- name: Use next release's VTGate
@@ -191,12 +201,17 @@ jobs:
# Running a test with vtgate at version n+1 and vttablet at version n
- name: Run query serving tests (vtgate=N+1, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
- name: Use current version VTGate, and other version VTTablet
@@ -213,9 +228,14 @@ jobs:
# Running a test with vtgate at version n and vttablet at version n+1
- name: Run query serving tests (vtgate=N, vttablet=N+1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
index 57392335000..5eb8f222587 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
+ name: Get Latest Release - Reparent New Vtctl
runs-on: ubuntu-latest
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent New Vtctl
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_next_release
@@ -43,18 +47,15 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +70,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,13 +80,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,8 +109,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -124,13 +129,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +158,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -185,9 +191,14 @@ jobs:
# Running a test with vtctl at version n+1 and vttablet at version n
- name: Run reparent tests (vtctl=N+1, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
index 99090a640b8..d63e2fa2d56 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_next_release:
if: always()
- name: Get latest release
+ name: Get Latest Release - Reparent New VTTablet
runs-on: ubuntu-latest
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
- echo "::set-output name=next_release_ref::${next_release_ref}"
+ echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent New VTTablet
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_next_release
@@ -43,18 +47,15 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -69,7 +70,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -78,13 +80,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,8 +109,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -124,13 +129,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_next_release.outputs.next_release }}
@@ -152,7 +158,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -182,9 +188,14 @@ jobs:
# Running a test with vtctl at version n and vttablet at version n+1
- name: Run reparent tests (vtctl=N, vttablet=N+1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
+
+ source build.env
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
index a1fc737a459..3f8ac4c3045 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
+ name: Get Previous Release - Reparent Old Vtctl
runs-on: ubuntu-latest
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent Old Vtctl
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_previous_release
@@ -43,15 +47,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +67,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,13 +77,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -101,8 +106,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -121,13 +126,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +155,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -182,9 +188,18 @@ jobs:
# Running a test with vtctl at version n-1 and vttablet at version n
- name: Run reparent tests (vtctl=N-1, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ source build.env
+
+ # skip TestCrossCellDurability test on v14 (as previous). It doesn't setup semi-sync the way this test (from v16) expects
+ export SKIPTESTCROSSCELLDURABILITY=1
+
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
index 96eee1fbb99..4166ec2bb7c 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
@@ -13,14 +13,14 @@ concurrency:
jobs:
get_previous_release:
if: always()
- name: Get latest release
+ name: Get Previous Release - Reparent Old VTTablet
runs-on: ubuntu-latest
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
steps:
- name: Check out to HEAD
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -29,12 +29,16 @@ jobs:
run: |
previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}})
echo $previous_release_ref
- echo "::set-output name=previous_release_ref::${previous_release_ref}"
+ echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
- name: Run Upgrade Downgrade Test
- runs-on: ubuntu-latest
+ name: Run Upgrade Downgrade Test - Reparent Old VTTablet
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }}
needs:
- get_previous_release
@@ -43,15 +47,12 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -66,7 +67,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -75,13 +77,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -101,8 +106,8 @@ jobs:
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install mysql80
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -121,13 +126,14 @@ jobs:
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release enable-only pxb-24
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
ref: ${{ needs.get_previous_release.outputs.previous_release }}
@@ -149,7 +155,7 @@ jobs:
# Checkout to this build's commit
- name: Check out commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -179,9 +185,18 @@ jobs:
# Running a test with vtctl at version n and vttablet at version n-1
- name: Run reparent tests (vtctl=N, vttablet=N-1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ rm -rf /tmp/vtdataroot
+ mkdir -p /tmp/vtdataroot
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data -docker=false -print-log -follow -tag upgrade_downgrade_reparent
+ source build.env
+
+ # skip TestCrossCellDurability test on v14 (as previous). It doesn't setup semi-sync the way this test (from v16) expects
+ export SKIPTESTCROSSCELLDURABILITY=1
+
+ eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_reparent
diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml
index 97a5ec9eca6..caeb7b963b5 100644
--- a/.github/workflows/vtadmin_web_build.yml
+++ b/.github/workflows/vtadmin_web_build.yml
@@ -20,13 +20,10 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/setup-node@v2
diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml
index 02071d72e53..050b3657ee5 100644
--- a/.github/workflows/vtadmin_web_lint.yml
+++ b/.github/workflows/vtadmin_web_lint.yml
@@ -20,13 +20,10 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/setup-node@v2
diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml
index 21172a25759..e3654e244c9 100644
--- a/.github/workflows/vtadmin_web_unit_tests.yml
+++ b/.github/workflows/vtadmin_web_unit_tests.yml
@@ -20,13 +20,10 @@ jobs:
id: skip-workflow
run: |
skip='false'
- if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/setup-node@v2
diff --git a/.gitignore b/.gitignore
index 6e48b17eca9..b84eb6958a2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -70,7 +70,7 @@ _test/
/vendor/*/
# release folder
-releases
+/releases/
# Angular2 Bower Libs
/web/vtctld2/.bowerrc~
@@ -88,3 +88,6 @@ venv
.scannerwork
report
+
+# plan test output
+/go/vt/vtgate/planbuilder/testdata/plan_test*
diff --git a/.golangci.yml b/.golangci.yml
index f57fb88dea3..e2bdb5336e4 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,5 +1,5 @@
run:
- go: 1.18
+ go: 1.19
timeout: 10m
skip-dirs:
- go/vt/topo/k8stopo/client
@@ -18,13 +18,10 @@ linters:
disable-all: true
enable:
# Defaults
- - deadcode
- errcheck
- govet
- ineffassign
- - structcheck
- typecheck
- - varcheck
- staticcheck
- gosimple
@@ -32,6 +29,7 @@ linters:
- gofmt
- goimports
- exportloopref
+ - bodyclose
# revive is a replacement for golint, but we do not run it in CI for now.
# This is only enabled as a post-commit hook
@@ -44,10 +42,6 @@ issues:
- errcheck
- goimports
- - path: '^go/vt/vtadmin/cache/'
- linters:
- - structcheck
-
### BEGIN: errcheck exclusion rules. Each rule should be considered
# a TODO for removal after adding error checks to that package/file/etc,
# except where otherwise noted.
@@ -160,4 +154,4 @@ issues:
# https://github.com/golangci/golangci/wiki/Configuration
service:
- golangci-lint-version: 1.46.2 # use the fixed version to not introduce new linters unexpectedly
+ golangci-lint-version: 1.51.2 # use the fixed version to not introduce new linters unexpectedly
diff --git a/Makefile b/Makefile
index 959965fae99..a8073167a1b 100644
--- a/Makefile
+++ b/Makefile
@@ -92,12 +92,6 @@ endif
-ldflags "$(shell tools/build_version_flags.sh)" \
-o ${VTROOTBIN} ./go/...
- # build vtorc with CGO, because it depends on sqlite
- CGO_ENABLED=1 go build \
- -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \
- -ldflags "$(shell tools/build_version_flags.sh)" \
- -o ${VTROOTBIN} ./go/cmd/vtorc/...
-
# cross-build can be used to cross-compile Vitess client binaries
# Outside of select client binaries (namely vtctlclient & vtexplain), cross-compiled Vitess Binaries are not recommended for production deployments
# Usage: GOOS=darwin GOARCH=amd64 make cross-build
@@ -120,8 +114,6 @@ endif
echo "Missing vttablet at: ${VTROOTBIN}/${GOOS}_${GOARCH}." && exit; \
fi
- # Cross-compiling w/ cgo isn't trivial and we don't need vtorc, so we can skip building it
-
debug:
ifndef NOBANNER
echo $$(date): Building source tree
@@ -145,8 +137,7 @@ install: build
cross-install: cross-build
# binaries
mkdir -p "$${PREFIX}/bin"
- # Still no vtorc for cross-compile
- cp "${VTROOTBIN}/${GOOS}_${GOARCH}/"{mysqlctl,mysqlctld,vtadmin,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtbackup} "$${PREFIX}/bin/"
+ cp "${VTROOTBIN}/${GOOS}_${GOARCH}/"{mysqlctl,mysqlctld,vtorc,vtadmin,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtbackup} "$${PREFIX}/bin/"
# Install local install the binaries needed to run vitess locally
# Usage: make install-local PREFIX=/path/to/install/root
@@ -292,7 +283,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
# This rule builds the bootstrap images for all flavors.
DOCKER_IMAGES_FOR_TEST = mariadb mariadb103 mysql57 mysql80 percona57 percona80
DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST)
-BOOTSTRAP_VERSION=11
+BOOTSTRAP_VERSION=21
ensure_bootstrap_version:
find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \;
sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go
@@ -490,8 +481,5 @@ vtadmin_authz_testgen:
generate_ci_workflows:
cd test && go run ci_workflow_gen.go && cd ..
-release-notes:
- go run ./go/tools/release-notes --from "$(FROM)" --to "$(TO)" --version "$(VERSION)" --summary "$(SUMMARY)"
-
install_kubectl_kind:
./tools/get_kubectl_kind.sh
diff --git a/build.env b/build.env
index 5a37f4f41bc..5bbc769419a 100755
--- a/build.env
+++ b/build.env
@@ -17,7 +17,7 @@
source ./tools/shell_functions.inc
go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions."
-goversion_min 1.18.5 || fail "Go version reported: `go version`. Version 1.18.5+ required. See https://vitess.io/contributing/build-from-source for install instructions."
+goversion_min 1.21.0 || echo "Go version reported: `go version`. Version 1.21.0+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
mkdir -p dist
mkdir -p bin
@@ -44,3 +44,8 @@ ln -sf "$PWD/misc/git/pre-commit" .git/hooks/pre-commit
ln -sf "$PWD/misc/git/commit-msg" .git/hooks/commit-msg
git config core.hooksPath .git/hooks
export EXTRA_BIN=$PWD/test/bin
+
+# support private github.com/slackhq/vitess-addons repo
+if [[ -n "${GH_ACCESS_TOKEN}" ]]; then
+ git config --global url.https://${GH_ACCESS_TOKEN}@github.com/.insteadOf https://github.com/
+fi
diff --git a/doc/releasenotes/10_0_0_release_notes.md b/changelog/10.0/10.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_0_release_notes.md
rename to changelog/10.0/10.0.0/release_notes.md
diff --git a/doc/releasenotes/10_0_1_release_notes.md b/changelog/10.0/10.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_1_release_notes.md
rename to changelog/10.0/10.0.1/release_notes.md
diff --git a/doc/releasenotes/10_0_2_release_notes.md b/changelog/10.0/10.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_2_release_notes.md
rename to changelog/10.0/10.0.2/release_notes.md
diff --git a/doc/releasenotes/10_0_3_release_notes.md b/changelog/10.0/10.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_3_release_notes.md
rename to changelog/10.0/10.0.3/release_notes.md
diff --git a/doc/releasenotes/10_0_3_summary.md b/changelog/10.0/10.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/10_0_3_summary.md
rename to changelog/10.0/10.0.3/summary.md
diff --git a/doc/releasenotes/10_0_4_release_notes.md b/changelog/10.0/10.0.4/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_4_release_notes.md
rename to changelog/10.0/10.0.4/release_notes.md
diff --git a/doc/releasenotes/10_0_4_summary.md b/changelog/10.0/10.0.4/summary.md
similarity index 100%
rename from doc/releasenotes/10_0_4_summary.md
rename to changelog/10.0/10.0.4/summary.md
diff --git a/doc/releasenotes/10_0_5_release_notes.md b/changelog/10.0/10.0.5/release_notes.md
similarity index 100%
rename from doc/releasenotes/10_0_5_release_notes.md
rename to changelog/10.0/10.0.5/release_notes.md
diff --git a/doc/releasenotes/10_0_5_summary.md b/changelog/10.0/10.0.5/summary.md
similarity index 100%
rename from doc/releasenotes/10_0_5_summary.md
rename to changelog/10.0/10.0.5/summary.md
diff --git a/changelog/10.0/README.md b/changelog/10.0/README.md
new file mode 100644
index 00000000000..304cc933a16
--- /dev/null
+++ b/changelog/10.0/README.md
@@ -0,0 +1,18 @@
+## v10.0
+* **[10.0.5](10.0.5)**
+ * [Release Notes](10.0.5/release_notes.md)
+
+* **[10.0.4](10.0.4)**
+ * [Release Notes](10.0.4/release_notes.md)
+
+* **[10.0.3](10.0.3)**
+ * [Release Notes](10.0.3/release_notes.md)
+
+* **[10.0.2](10.0.2)**
+ * [Release Notes](10.0.2/release_notes.md)
+
+* **[10.0.1](10.0.1)**
+ * [Release Notes](10.0.1/release_notes.md)
+
+* **[10.0.0](10.0.0)**
+ * [Release Notes](10.0.0/release_notes.md)
diff --git a/doc/releasenotes/11_0_0_release_notes.md b/changelog/11.0/11.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_0_release_notes.md
rename to changelog/11.0/11.0.0/release_notes.md
diff --git a/doc/releasenotes/11_0_1_release_notes.md b/changelog/11.0/11.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_1_release_notes.md
rename to changelog/11.0/11.0.1/release_notes.md
diff --git a/doc/releasenotes/11_0_2_release_notes.md b/changelog/11.0/11.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_2_release_notes.md
rename to changelog/11.0/11.0.2/release_notes.md
diff --git a/doc/releasenotes/11_0_2_summary.md b/changelog/11.0/11.0.2/summary.md
similarity index 100%
rename from doc/releasenotes/11_0_2_summary.md
rename to changelog/11.0/11.0.2/summary.md
diff --git a/doc/releasenotes/11_0_3_release_notes.md b/changelog/11.0/11.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_3_release_notes.md
rename to changelog/11.0/11.0.3/release_notes.md
diff --git a/doc/releasenotes/11_0_3_summary.md b/changelog/11.0/11.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/11_0_3_summary.md
rename to changelog/11.0/11.0.3/summary.md
diff --git a/doc/releasenotes/11_0_4_release_notes.md b/changelog/11.0/11.0.4/release_notes.md
similarity index 100%
rename from doc/releasenotes/11_0_4_release_notes.md
rename to changelog/11.0/11.0.4/release_notes.md
diff --git a/doc/releasenotes/11_0_4_summary.md b/changelog/11.0/11.0.4/summary.md
similarity index 100%
rename from doc/releasenotes/11_0_4_summary.md
rename to changelog/11.0/11.0.4/summary.md
diff --git a/changelog/11.0/README.md b/changelog/11.0/README.md
new file mode 100644
index 00000000000..51dfb2e5648
--- /dev/null
+++ b/changelog/11.0/README.md
@@ -0,0 +1,15 @@
+## v11.0
+* **[11.0.4](11.0.4)**
+ * [Release Notes](11.0.4/release_notes.md)
+
+* **[11.0.3](11.0.3)**
+ * [Release Notes](11.0.3/release_notes.md)
+
+* **[11.0.2](11.0.2)**
+ * [Release Notes](11.0.2/release_notes.md)
+
+* **[11.0.1](11.0.1)**
+ * [Release Notes](11.0.1/release_notes.md)
+
+* **[11.0.0](11.0.0)**
+ * [Release Notes](11.0.0/release_notes.md)
diff --git a/doc/releasenotes/12_0_0_release_notes.md b/changelog/12.0/12.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_0_release_notes.md
rename to changelog/12.0/12.0.0/release_notes.md
diff --git a/doc/releasenotes/12_0_0_summary.md b/changelog/12.0/12.0.0/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_0_summary.md
rename to changelog/12.0/12.0.0/summary.md
diff --git a/doc/releasenotes/12_0_1_release_notes.md b/changelog/12.0/12.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_1_release_notes.md
rename to changelog/12.0/12.0.1/release_notes.md
diff --git a/doc/releasenotes/12_0_1_summary.md b/changelog/12.0/12.0.1/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_1_summary.md
rename to changelog/12.0/12.0.1/summary.md
diff --git a/doc/releasenotes/12_0_2_release_notes.md b/changelog/12.0/12.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_2_release_notes.md
rename to changelog/12.0/12.0.2/release_notes.md
diff --git a/doc/releasenotes/12_0_2_summary.md b/changelog/12.0/12.0.2/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_2_summary.md
rename to changelog/12.0/12.0.2/summary.md
diff --git a/doc/releasenotes/12_0_3_release_notes.md b/changelog/12.0/12.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_3_release_notes.md
rename to changelog/12.0/12.0.3/release_notes.md
diff --git a/doc/releasenotes/12_0_3_summary.md b/changelog/12.0/12.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_3_summary.md
rename to changelog/12.0/12.0.3/summary.md
diff --git a/doc/releasenotes/12_0_4_release_notes.md b/changelog/12.0/12.0.4/release_notes.md
similarity index 100%
rename from doc/releasenotes/12_0_4_release_notes.md
rename to changelog/12.0/12.0.4/release_notes.md
diff --git a/doc/releasenotes/12_0_5_changelog.md b/changelog/12.0/12.0.5/changelog.md
similarity index 100%
rename from doc/releasenotes/12_0_5_changelog.md
rename to changelog/12.0/12.0.5/changelog.md
diff --git a/doc/releasenotes/12_0_5_release_notes.md b/changelog/12.0/12.0.5/release_notes.md
similarity index 92%
rename from doc/releasenotes/12_0_5_release_notes.md
rename to changelog/12.0/12.0.5/release_notes.md
index fc2c613e4da..dbff8a5aade 100644
--- a/doc/releasenotes/12_0_5_release_notes.md
+++ b/changelog/12.0/12.0.5/release_notes.md
@@ -9,7 +9,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d
> go1.17.12 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the runtime, and the runtime/metrics package. [See the Go 1.17.12 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.17.12+label%3ACherryPickApproved) on our issue tracker for details.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_5_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.5/changelog.md).
The release includes 7 commits (excluding merges)
diff --git a/doc/releasenotes/12_0_6_changelog.md b/changelog/12.0/12.0.6/changelog.md
similarity index 100%
rename from doc/releasenotes/12_0_6_changelog.md
rename to changelog/12.0/12.0.6/changelog.md
diff --git a/doc/releasenotes/12_0_6_release_notes.md b/changelog/12.0/12.0.6/release_notes.md
similarity index 93%
rename from doc/releasenotes/12_0_6_release_notes.md
rename to changelog/12.0/12.0.6/release_notes.md
index 8afbe0a4239..c9c743d95ea 100644
--- a/doc/releasenotes/12_0_6_release_notes.md
+++ b/changelog/12.0/12.0.6/release_notes.md
@@ -15,7 +15,7 @@ This change is documented on our website [here](https://vitess.io/docs/12.0/over
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_6_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.6/changelog.md).
The release includes 11 commits (excluding merges)
diff --git a/doc/releasenotes/12_0_6_summary.md b/changelog/12.0/12.0.6/summary.md
similarity index 100%
rename from doc/releasenotes/12_0_6_summary.md
rename to changelog/12.0/12.0.6/summary.md
diff --git a/changelog/12.0/README.md b/changelog/12.0/README.md
new file mode 100644
index 00000000000..131b2df443d
--- /dev/null
+++ b/changelog/12.0/README.md
@@ -0,0 +1,23 @@
+## v12.0
+* **[12.0.6](12.0.6)**
+ * [Changelog](12.0.6/changelog.md)
+ * [Release Notes](12.0.6/release_notes.md)
+
+* **[12.0.5](12.0.5)**
+ * [Changelog](12.0.5/changelog.md)
+ * [Release Notes](12.0.5/release_notes.md)
+
+* **[12.0.4](12.0.4)**
+ * [Release Notes](12.0.4/release_notes.md)
+
+* **[12.0.3](12.0.3)**
+ * [Release Notes](12.0.3/release_notes.md)
+
+* **[12.0.2](12.0.2)**
+ * [Release Notes](12.0.2/release_notes.md)
+
+* **[12.0.1](12.0.1)**
+ * [Release Notes](12.0.1/release_notes.md)
+
+* **[12.0.0](12.0.0)**
+ * [Release Notes](12.0.0/release_notes.md)
diff --git a/doc/releasenotes/13_0_0_release_notes.md b/changelog/13.0/13.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/13_0_0_release_notes.md
rename to changelog/13.0/13.0.0/release_notes.md
diff --git a/doc/releasenotes/13_0_0_summary.md b/changelog/13.0/13.0.0/summary.md
similarity index 100%
rename from doc/releasenotes/13_0_0_summary.md
rename to changelog/13.0/13.0.0/summary.md
diff --git a/doc/releasenotes/13_0_1_release_notes.md b/changelog/13.0/13.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/13_0_1_release_notes.md
rename to changelog/13.0/13.0.1/release_notes.md
diff --git a/doc/releasenotes/13_0_2_changelog.md b/changelog/13.0/13.0.2/changelog.md
similarity index 100%
rename from doc/releasenotes/13_0_2_changelog.md
rename to changelog/13.0/13.0.2/changelog.md
diff --git a/doc/releasenotes/13_0_2_release_notes.md b/changelog/13.0/13.0.2/release_notes.md
similarity index 93%
rename from doc/releasenotes/13_0_2_release_notes.md
rename to changelog/13.0/13.0.2/release_notes.md
index 310eb5e633a..12692031e2a 100644
--- a/doc/releasenotes/13_0_2_release_notes.md
+++ b/changelog/13.0/13.0.2/release_notes.md
@@ -9,7 +9,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d
> go1.17.12 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the runtime, and the runtime/metrics package. [See the Go 1.17.12 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.17.12+label%3ACherryPickApproved) on our issue tracker for details.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/13_0_2_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/13.0/13.0.2/changelog.md).
The release includes 28 commits (excluding merges)
Thanks to all our contributors: @GuptaManan100, @aquarapid, @frouioui, @harshit-gangal, @mattlord, @rohit-nayak-ps, @systay, @vitess-bot[bot], @vmg
\ No newline at end of file
diff --git a/doc/releasenotes/13_0_2_summary.md b/changelog/13.0/13.0.2/summary.md
similarity index 100%
rename from doc/releasenotes/13_0_2_summary.md
rename to changelog/13.0/13.0.2/summary.md
diff --git a/doc/releasenotes/13_0_3_changelog.md b/changelog/13.0/13.0.3/changelog.md
similarity index 100%
rename from doc/releasenotes/13_0_3_changelog.md
rename to changelog/13.0/13.0.3/changelog.md
diff --git a/doc/releasenotes/13_0_3_release_notes.md b/changelog/13.0/13.0.3/release_notes.md
similarity index 93%
rename from doc/releasenotes/13_0_3_release_notes.md
rename to changelog/13.0/13.0.3/release_notes.md
index 3fee980f099..b04c0d69d20 100644
--- a/doc/releasenotes/13_0_3_release_notes.md
+++ b/changelog/13.0/13.0.3/release_notes.md
@@ -15,7 +15,7 @@ This change is documented on our website [here](https://vitess.io/docs/13.0/over
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/13_0_3_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/13.0/13.0.3/changelog.md).
The release includes 17 commits(excluding merges)
diff --git a/doc/releasenotes/13_0_3_summary.md b/changelog/13.0/13.0.3/summary.md
similarity index 100%
rename from doc/releasenotes/13_0_3_summary.md
rename to changelog/13.0/13.0.3/summary.md
diff --git a/changelog/13.0/README.md b/changelog/13.0/README.md
new file mode 100644
index 00000000000..780625ef69a
--- /dev/null
+++ b/changelog/13.0/README.md
@@ -0,0 +1,14 @@
+## v13.0
+* **[13.0.3](13.0.3)**
+ * [Changelog](13.0.3/changelog.md)
+ * [Release Notes](13.0.3/release_notes.md)
+
+* **[13.0.2](13.0.2)**
+ * [Changelog](13.0.2/changelog.md)
+ * [Release Notes](13.0.2/release_notes.md)
+
+* **[13.0.1](13.0.1)**
+ * [Release Notes](13.0.1/release_notes.md)
+
+* **[13.0.0](13.0.0)**
+ * [Release Notes](13.0.0/release_notes.md)
diff --git a/doc/releasenotes/14_0_0_changelog.md b/changelog/14.0/14.0.0/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_0_changelog.md
rename to changelog/14.0/14.0.0/changelog.md
diff --git a/doc/releasenotes/14_0_0_release_notes.md b/changelog/14.0/14.0.0/release_notes.md
similarity index 98%
rename from doc/releasenotes/14_0_0_release_notes.md
rename to changelog/14.0/14.0.0/release_notes.md
index 50d02232ae1..5f88f6975db 100644
--- a/doc/releasenotes/14_0_0_release_notes.md
+++ b/changelog/14.0/14.0.0/release_notes.md
@@ -18,6 +18,7 @@
## Known Issues
- [VTOrc doesn't discover the tablets](https://github.com/vitessio/vitess/issues/10650) of a keyspace if the durability policy doesn't exist in the topo server when it comes up. This can be resolved by restarting VTOrc.
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
## Major Changes
@@ -318,7 +319,7 @@ Work has gone into making the advisory locks (`get_lock()`, `release_lock()`, et
A long time ago, the sharding column and type were specified at the keyspace level. This syntax is now deprecated and will be removed in v15.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_0_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.0/changelog.md).
The release includes 1101 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_0_summary.md b/changelog/14.0/14.0.0/summary.md
similarity index 98%
rename from doc/releasenotes/14_0_0_summary.md
rename to changelog/14.0/14.0.0/summary.md
index e02c9d9a282..6047ec7ec16 100644
--- a/doc/releasenotes/14_0_0_summary.md
+++ b/changelog/14.0/14.0.0/summary.md
@@ -17,6 +17,7 @@
## Known Issues
- [VTOrc doesn't discover the tablets](https://github.com/vitessio/vitess/issues/10650) of a keyspace if the durability policy doesn't exist in the topo server when it comes up. This can be resolved by restarting VTOrc.
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
## Major Changes
diff --git a/doc/releasenotes/14_0_1_changelog.md b/changelog/14.0/14.0.1/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_1_changelog.md
rename to changelog/14.0/14.0.1/changelog.md
diff --git a/doc/releasenotes/14_0_1_release_notes.md b/changelog/14.0/14.0.1/release_notes.md
similarity index 80%
rename from doc/releasenotes/14_0_1_release_notes.md
rename to changelog/14.0/14.0.1/release_notes.md
index 7215301939f..639af4fce96 100644
--- a/doc/releasenotes/14_0_1_release_notes.md
+++ b/changelog/14.0/14.0.1/release_notes.md
@@ -1,4 +1,8 @@
# Release of Vitess v14.0.1
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.4`
@@ -9,7 +13,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d
> go1.18.4 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the linker, the runtime, and the runtime/metrics package. [See the Go 1.18.4 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.18.4+label%3ACherryPickApproved) on our issue tracker for details.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_1_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.1/changelog.md).
The release includes 25 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_1_summary.md b/changelog/14.0/14.0.1/summary.md
similarity index 80%
rename from doc/releasenotes/14_0_1_summary.md
rename to changelog/14.0/14.0.1/summary.md
index 1f2f985baba..5a176b6af16 100644
--- a/doc/releasenotes/14_0_1_summary.md
+++ b/changelog/14.0/14.0.1/summary.md
@@ -1,3 +1,7 @@
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.4`
diff --git a/doc/releasenotes/14_0_2_changelog.md b/changelog/14.0/14.0.2/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_2_changelog.md
rename to changelog/14.0/14.0.2/changelog.md
diff --git a/doc/releasenotes/14_0_2_release_notes.md b/changelog/14.0/14.0.2/release_notes.md
similarity index 83%
rename from doc/releasenotes/14_0_2_release_notes.md
rename to changelog/14.0/14.0.2/release_notes.md
index 956ca21ef62..724673af576 100644
--- a/doc/releasenotes/14_0_2_release_notes.md
+++ b/changelog/14.0/14.0.2/release_notes.md
@@ -1,4 +1,8 @@
# Release of Vitess v14.0.2
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.5`
@@ -14,7 +18,7 @@ Since the end-of-life of MariaDB 10.2, its Docker image is unavailable, and we d
You can find more information on the list of supported databases on our documentation website, [here](https://vitess.io/docs/14.0/overview/supported-databases/).
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_2_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.2/changelog.md).
The release includes 23 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_2_summary.md b/changelog/14.0/14.0.2/summary.md
similarity index 85%
rename from doc/releasenotes/14_0_2_summary.md
rename to changelog/14.0/14.0.2/summary.md
index 05a1aac5d68..8b26887dd01 100644
--- a/doc/releasenotes/14_0_2_summary.md
+++ b/changelog/14.0/14.0.2/summary.md
@@ -1,3 +1,7 @@
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Upgrade to `go1.18.5`
diff --git a/doc/releasenotes/14_0_3_changelog.md b/changelog/14.0/14.0.3/changelog.md
similarity index 100%
rename from doc/releasenotes/14_0_3_changelog.md
rename to changelog/14.0/14.0.3/changelog.md
diff --git a/doc/releasenotes/14_0_3_release_notes.md b/changelog/14.0/14.0.3/release_notes.md
similarity index 77%
rename from doc/releasenotes/14_0_3_release_notes.md
rename to changelog/14.0/14.0.3/release_notes.md
index d9cd4ac7a61..5d5cc9b871c 100644
--- a/doc/releasenotes/14_0_3_release_notes.md
+++ b/changelog/14.0/14.0.3/release_notes.md
@@ -1,4 +1,8 @@
# Release of Vitess v14.0.3
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Fix VTOrc Discovery
@@ -8,7 +12,7 @@ This problem could be resolved by restarting the VTOrc so that it discovers all
frequently, this posed a greater challenge, since some pods when evicted and rescheduled on a different node, would sometimes fail to be discovered by VTOrc.
This has problem has been addressed in this patch by the fix https://github.com/vitessio/vitess/pull/10662.
------------
-The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_3_changelog.md).
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.3/changelog.md).
The release includes 12 commits (excluding merges)
diff --git a/doc/releasenotes/14_0_3_summary.md b/changelog/14.0/14.0.3/summary.md
similarity index 77%
rename from doc/releasenotes/14_0_3_summary.md
rename to changelog/14.0/14.0.3/summary.md
index 9d9364f67b3..121d9054a19 100644
--- a/doc/releasenotes/14_0_3_summary.md
+++ b/changelog/14.0/14.0.3/summary.md
@@ -1,3 +1,7 @@
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
## Major Changes
### Fix VTOrc Discovery
diff --git a/changelog/14.0/README.md b/changelog/14.0/README.md
new file mode 100644
index 00000000000..0aaddf7abb6
--- /dev/null
+++ b/changelog/14.0/README.md
@@ -0,0 +1,16 @@
+## v14.0
+* **[14.0.3](14.0.3)**
+ * [Changelog](14.0.3/changelog.md)
+ * [Release Notes](14.0.3/release_notes.md)
+
+* **[14.0.2](14.0.2)**
+ * [Changelog](14.0.2/changelog.md)
+ * [Release Notes](14.0.2/release_notes.md)
+
+* **[14.0.1](14.0.1)**
+ * [Changelog](14.0.1/changelog.md)
+ * [Release Notes](14.0.1/release_notes.md)
+
+* **[14.0.0](14.0.0)**
+ * [Changelog](14.0.0/changelog.md)
+ * [Release Notes](14.0.0/release_notes.md)
diff --git a/changelog/15.0/15.0.0/changelog.md b/changelog/15.0/15.0.0/changelog.md
new file mode 100644
index 00000000000..88da2b594bf
--- /dev/null
+++ b/changelog/15.0/15.0.0/changelog.md
@@ -0,0 +1,616 @@
+# Changelog of Vitess v15.0.0
+
+### Announcement
+#### Query Serving
+ * Remove tablet query plan field caching [#10489](https://github.com/vitessio/vitess/pull/10489)
+ * delete unused flags and handling of deprecated protobuf fields [#10612](https://github.com/vitessio/vitess/pull/10612)
+### Bug fixes
+#### Backup and Restore
+ * Remove built-in decompression flag [#10670](https://github.com/vitessio/vitess/pull/10670)
+ * Fixing logic for backup progress [#10794](https://github.com/vitessio/vitess/pull/10794)
+ * Backups: Support InnoDB Redo Log Location With 8.0.30+ [#10847](https://github.com/vitessio/vitess/pull/10847)
+ * fix: objname was not logged correctly [#11038](https://github.com/vitessio/vitess/pull/11038)
+ * [release-15.0] Detect redo log location dynamically based on presence (#11555) [#11558](https://github.com/vitessio/vitess/pull/11558)
+#### Build/CI
+ * Fixed the release notes CI check helper [#10574](https://github.com/vitessio/vitess/pull/10574)
+ * Remove potential double close of channel [#10929](https://github.com/vitessio/vitess/pull/10929)
+ * Add explicit close state to memory topo connection [#11110](https://github.com/vitessio/vitess/pull/11110)
+ * Use Ubuntu 20 for vtgate and tabletmanager workflows [#11152](https://github.com/vitessio/vitess/pull/11152)
+ * Fix vtcompose and docker-compose examples [#11188](https://github.com/vitessio/vitess/pull/11188)
+ * Fix the script `check_make_sizegen` [#11465](https://github.com/vitessio/vitess/pull/11465)
+ * Skip `TestComparisonSemantics` test [#11474](https://github.com/vitessio/vitess/pull/11474)
+#### CLI
+ * [vtadmin] Update how tracing flags are registered [#11063](https://github.com/vitessio/vitess/pull/11063)
+ * CLI Pflag migration work: Fix regression caused by pflag-vreplication PR [#11127](https://github.com/vitessio/vitess/pull/11127)
+#### Cluster management
+ * Fix Online DDL Revert flakiness [#10675](https://github.com/vitessio/vitess/pull/10675)
+ * Fix pprof toggling via SIGUSR1 and waitSig flag [#10875](https://github.com/vitessio/vitess/pull/10875)
+ * BugFix: Vttablet semi-sync settings on startup [#10881](https://github.com/vitessio/vitess/pull/10881)
+ * BugFix: vtctld panic with `enable_realtime_stats` [#10902](https://github.com/vitessio/vitess/pull/10902)
+ * Fix races in memory topo and watcher [#11065](https://github.com/vitessio/vitess/pull/11065)
+ * [vtctld] Fix nil-ness in healthcheck [#11067](https://github.com/vitessio/vitess/pull/11067)
+ * Install panic handler for all grpcvtctldserver endpoints [#11184](https://github.com/vitessio/vitess/pull/11184)
+ * Fix tablet debug/env template variable name [#11348](https://github.com/vitessio/vitess/pull/11348)
+#### Evalengine
+ * evalengine: support mismatched numerical types [#10997](https://github.com/vitessio/vitess/pull/10997)
+#### Examples
+ * Fix local example scripts [#11319](https://github.com/vitessio/vitess/pull/11319)
+#### General
+ * RateLimiter: exit goroutine at Stop() [#10755](https://github.com/vitessio/vitess/pull/10755)
+ * Fix frouioui in the MAINTAINERS.md file [#11343](https://github.com/vitessio/vitess/pull/11343)
+ * Stats Flags: include stats flags in the correct binaries [#11450](https://github.com/vitessio/vitess/pull/11450)
+ * Test flags: Update logic for parsing test flags to run unit tests within GoLand and to parse test flags in vtgate to allow running unit tests [#11551](https://github.com/vitessio/vitess/pull/11551)
+#### Operator
+ * Increase the memory limit of the vitess-operator [#11548](https://github.com/vitessio/vitess/pull/11548)
+#### Query Serving
+ * fix: scalar aggregation engine primitive [#10465](https://github.com/vitessio/vitess/pull/10465)
+ * fix: aggregation empty row on join with grouping and aggregations [#10480](https://github.com/vitessio/vitess/pull/10480)
+ * Fix parsing of CAST() statements [#10512](https://github.com/vitessio/vitess/pull/10512)
+ * Add back unary single column expression check [#10514](https://github.com/vitessio/vitess/pull/10514)
+ * fix: handle planner_version and planner-version correctly [#10534](https://github.com/vitessio/vitess/pull/10534)
+ * Fix casing of vitess migration syntax and comments printing [#10535](https://github.com/vitessio/vitess/pull/10535)
+ * Fix vtgate query log table name extraction for DML statements [#10536](https://github.com/vitessio/vitess/pull/10536)
+ * VReplication: more unrecoverable error codes [#10559](https://github.com/vitessio/vitess/pull/10559)
+ * Add support for INSERT() string function [#10593](https://github.com/vitessio/vitess/pull/10593)
+ * Ignoring empty queries with MySQL dashed comments [#10634](https://github.com/vitessio/vitess/pull/10634)
+ * Online DDL: deal with autogenerated CHECK constraint names [#10638](https://github.com/vitessio/vitess/pull/10638)
+ * Inject shard name in commit-phase multi-shard errors [#10669](https://github.com/vitessio/vitess/pull/10669)
+ * Parameterize BIT types and fixes in HEX types [#10689](https://github.com/vitessio/vitess/pull/10689)
+ * BugFix: Keep predicates in join when pushing new ones [#10715](https://github.com/vitessio/vitess/pull/10715)
+ * BugFix: Gen4CompareV3 planner reverted to Gen4 on Update queries [#10722](https://github.com/vitessio/vitess/pull/10722)
+ * Fix gen4 planner handling of `<=>` operator with `NULL` operand [#10754](https://github.com/vitessio/vitess/pull/10754)
+ * BugFix: `--queryserver-config-terse-errors` shouldn't redact `Row count exceeded` error and shouldn't affect ignoring `Duplicate entry` error in lookup vindexes [#10762](https://github.com/vitessio/vitess/pull/10762)
+ * Wrong length creating the buffer needed to ask for full auth [#10767](https://github.com/vitessio/vitess/pull/10767)
+ * fix: evalengine - check compare numeric of same type [#10793](https://github.com/vitessio/vitess/pull/10793)
+ * Fix client session state tracking logic [#10871](https://github.com/vitessio/vitess/pull/10871)
+ * feat: don't use the vtgatehandler unless it is known to have been initialized [#10879](https://github.com/vitessio/vitess/pull/10879)
+ * schemadiff: ordering and applying a RenameColumn [#10912](https://github.com/vitessio/vitess/pull/10912)
+ * Fix parsing of `PARTITION BY KEY` [#10958](https://github.com/vitessio/vitess/pull/10958)
+ * Online DDL, CancelMigration: distinguish user-issued vs. internally-issued cancellation [#11011](https://github.com/vitessio/vitess/pull/11011)
+ * Use the correct error type for dependencies [#11018](https://github.com/vitessio/vitess/pull/11018)
+ * Fix AST copying of basic types [#11046](https://github.com/vitessio/vitess/pull/11046)
+ * fix: return when instructions are nil in checkThatPlanIsValid [#11070](https://github.com/vitessio/vitess/pull/11070)
+ * Fix handling zero byte string for session state changed [#11071](https://github.com/vitessio/vitess/pull/11071)
+ * Fix logging formatting mistake [#11086](https://github.com/vitessio/vitess/pull/11086)
+ * On demand heartbeats: fix race condition closing the writer [#11157](https://github.com/vitessio/vitess/pull/11157)
+ * Fix problematic watch cancellation due to context cancellation [#11170](https://github.com/vitessio/vitess/pull/11170)
+ * Fix OK packet parsing logic [#11176](https://github.com/vitessio/vitess/pull/11176)
+ * Fix: Insert using select streaming bug [#11248](https://github.com/vitessio/vitess/pull/11248)
+ * bugfix: Truncate columns even when sorting on vtgate [#11265](https://github.com/vitessio/vitess/pull/11265)
+ * Fix query list override issue on mysql restart [#11309](https://github.com/vitessio/vitess/pull/11309)
+ * Fix conditions after `<=>` operator on left joined table columns being ignored for routing purposes. [#11310](https://github.com/vitessio/vitess/pull/11310)
+ * Fix complex predicates being pulled into `ON` conditions for `LEFT JOIN` statements. [#11317](https://github.com/vitessio/vitess/pull/11317)
+ * Handle cache value type correctly [#11369](https://github.com/vitessio/vitess/pull/11369)
+ * Push down derived tables under route when possible [#11379](https://github.com/vitessio/vitess/pull/11379)
+ * Fix: DML engine multiequal support [#11395](https://github.com/vitessio/vitess/pull/11395)
+ * Allow parenthesis around derived tables [#11407](https://github.com/vitessio/vitess/pull/11407)
+ * fix: do not rewrite single columns in derived tables [#11419](https://github.com/vitessio/vitess/pull/11419)
+ * Plan order by `COUNT(X)` [#11420](https://github.com/vitessio/vitess/pull/11420)
+ * Fix #11455 - skip vindex operations for `DELETE` statements against unsharded tables [#11461](https://github.com/vitessio/vitess/pull/11461)
+ * bug fix: using self-referencing columns in HAVING should not overflow [#11499](https://github.com/vitessio/vitess/pull/11499)
+ * [release-15.0] fix: reserve connection to follow query timeout when outside of transaction (#11490) [#11505](https://github.com/vitessio/vitess/pull/11505)
+ * [15.0] Fix query list override issue on mysql restart (#11309) [#11506](https://github.com/vitessio/vitess/pull/11506)
+ * Fix `HAVING` rewriting made in #11306 [#11515](https://github.com/vitessio/vitess/pull/11515)
+ * [15.0] fix: fail over reconnect in stream execution for connection with transaction [#11527](https://github.com/vitessio/vitess/pull/11527)
+ * [15.0] Fix: concatenate engine in transaction [#11534](https://github.com/vitessio/vitess/pull/11534)
+ * Redact bind variables in mysql errors [#11540](https://github.com/vitessio/vitess/pull/11540)
+#### TabletManager
+ * Fix schema engine close and ticks race [#10386](https://github.com/vitessio/vitess/pull/10386)
+#### VReplication
+ * VStream API: Fix vtgate memory leaks when context gets cancelled [#10571](https://github.com/vitessio/vitess/pull/10571)
+ * VReplication: retry in WaitForPos when read of pos is killed off by deadlock detector [#10621](https://github.com/vitessio/vitess/pull/10621)
+ * MoveTables: use source timezone to adjust datetime columns on update statements [#10667](https://github.com/vitessio/vitess/pull/10667)
+ * VDiff2: ignore errors while attempting to purge vdiff tables [#10725](https://github.com/vitessio/vitess/pull/10725)
+ * Add drop_foreign_keys to v2 MoveTables command [#10773](https://github.com/vitessio/vitess/pull/10773)
+ * Ensure VDiff Engine is open in RPC entrypoint [#10969](https://github.com/vitessio/vitess/pull/10969)
+ * VReplication: vreplication_max_time_to_retry_on_error default to zero, no limit [#11031](https://github.com/vitessio/vitess/pull/11031)
+ * VReplication: Handle DECIMAL 0 Value Edge Case [#11212](https://github.com/vitessio/vitess/pull/11212)
+ * Don't run VDiff on frozen workflows [#11234](https://github.com/vitessio/vitess/pull/11234)
+ * VStreamer: fix deadlock when there are a lot of vschema changes at the same time as binlog events [#11325](https://github.com/vitessio/vitess/pull/11325)
+ * VDiff: Make restarting VReplication workflow more robust [#11413](https://github.com/vitessio/vitess/pull/11413)
+#### VTCombo
+ * vtcombo mutates options, make a copy to avoid this [#11223](https://github.com/vitessio/vitess/pull/11223)
+#### VTorc
+ * Fix panic in VTOrc [#10519](https://github.com/vitessio/vitess/pull/10519)
+ * Fix VTOrc Discovery to also retry discovering tablets which aren't present in database_instance table [#10662](https://github.com/vitessio/vitess/pull/10662)
+ * BugFix: VTOrc should repair replication if either replication thread is stopped [#10786](https://github.com/vitessio/vitess/pull/10786)
+ * Fix VTOrc holding locks after shutdown [#11442](https://github.com/vitessio/vitess/pull/11442)
+ * [15.0] Fix VTOrc to handle multiple failures [#11489](https://github.com/vitessio/vitess/pull/11489)
+#### vttestserver
+ * Fix flags in vttestserver run script used in the docker image [#11354](https://github.com/vitessio/vitess/pull/11354)
+### CI/Build
+#### Backup and Restore
+ * Revert: Revert temporary workflow changes made in #10847 [#10914](https://github.com/vitessio/vitess/pull/10914)
+#### Build/CI
+ * tablegc test to utilize new capability logic [#10463](https://github.com/vitessio/vitess/pull/10463)
+ * docs: add query serving features to the release notes [#10475](https://github.com/vitessio/vitess/pull/10475)
+ * Modified the Pull Request review checklist to check for descriptive Pull Request titles [#10485](https://github.com/vitessio/vitess/pull/10485)
+ * Take into account `github.ref` when doing upgrade-downgrade tests [#10504](https://github.com/vitessio/vitess/pull/10504)
+ * add vtadmin web files to all lite images [#10581](https://github.com/vitessio/vitess/pull/10581)
+ * Removed the check label in upgrade downgrade tests [#10583](https://github.com/vitessio/vitess/pull/10583)
+ * Add stale PRs action [#10603](https://github.com/vitessio/vitess/pull/10603)
+ * Allow manual workflow_dispatch for close_stale_pull_requests [#10610](https://github.com/vitessio/vitess/pull/10610)
+ * Enable stale PR closer [#10617](https://github.com/vitessio/vitess/pull/10617)
+ * fix: build [#10647](https://github.com/vitessio/vitess/pull/10647)
+ * Remove the review checklist workflow [#10656](https://github.com/vitessio/vitess/pull/10656)
+ * Add MySQL 8 Support to Backup Tests [#10691](https://github.com/vitessio/vitess/pull/10691)
+ * Remove MariaDB 10.2 Unit Test in v15 [#10700](https://github.com/vitessio/vitess/pull/10700)
+ * Auto Detect MySQL Version and Use in vtgate mysql_server_version Flag [#10701](https://github.com/vitessio/vitess/pull/10701)
+ * Reduce Flakiness of ERS/PRS e2e Tests Using Retries With a Timeout [#10720](https://github.com/vitessio/vitess/pull/10720)
+ * Flakes: Increase timeouts for upgrade_downgrade workflows [#10735](https://github.com/vitessio/vitess/pull/10735)
+ * fix: use go-unit-report fork version in ci workflow [#10757](https://github.com/vitessio/vitess/pull/10757)
+ * Add the linter for exporting a loop variable through a pointer reference [#10763](https://github.com/vitessio/vitess/pull/10763)
+ * Be explicit about capturing the pointer [#10765](https://github.com/vitessio/vitess/pull/10765)
+ * looking into onlineddl_vrepl_stress_suite flakiness in CI [#10779](https://github.com/vitessio/vitess/pull/10779)
+ * Add semgrep CI workflow [#10826](https://github.com/vitessio/vitess/pull/10826)
+ * onlineddl_vrepl flakiness: further work [#10876](https://github.com/vitessio/vitess/pull/10876)
+ * Revert temporary workflow changes made in #10847 [#10896](https://github.com/vitessio/vitess/pull/10896)
+ * Fix main in CI [#10953](https://github.com/vitessio/vitess/pull/10953)
+ * Avoid race condition in memory topo watch shutdown [#10954](https://github.com/vitessio/vitess/pull/10954)
+ * Remove accidentally added fmt.Printf from debugging [#10967](https://github.com/vitessio/vitess/pull/10967)
+ * Add more robust go version handling [#11001](https://github.com/vitessio/vitess/pull/11001)
+ * Run latest gofmt on everything & address linter warnings [#11008](https://github.com/vitessio/vitess/pull/11008)
+ * Fix mariadb103 ci [#11015](https://github.com/vitessio/vitess/pull/11015)
+ * Online DDL vrepl suite: fix auto_increment tests in 8.0 [#11019](https://github.com/vitessio/vitess/pull/11019)
+ * CI: change upgrade/downgrade tests to use vitessio fork of go-junit-report [#11023](https://github.com/vitessio/vitess/pull/11023)
+ * Add workflow file to the filter rules [#11032](https://github.com/vitessio/vitess/pull/11032)
+ * Add upgrade-downgrade tests for next releases [#11033](https://github.com/vitessio/vitess/pull/11033)
+ * fix missing vtadmin binary in docker image [#11076](https://github.com/vitessio/vitess/pull/11076)
+ * Refactor vtorc tests to run as a single test with sub-tests [#11108](https://github.com/vitessio/vitess/pull/11108)
+ * Upgrade to Ubuntu 20.04 for endtoend tests [#11113](https://github.com/vitessio/vitess/pull/11113)
+ * Move tabletmanager CI jobs to 20.04 [#11116](https://github.com/vitessio/vitess/pull/11116)
+ * Upgrade vtgate CI jobs to 20.04 [#11118](https://github.com/vitessio/vitess/pull/11118)
+ * Upgrade CI for unit tests to Ubuntu 20.04 [#11119](https://github.com/vitessio/vitess/pull/11119)
+ * Move towards MySQL 8.0 as the default template generation [#11153](https://github.com/vitessio/vitess/pull/11153)
+ * Add VTOrc and VTAdmin to Region example [#11172](https://github.com/vitessio/vitess/pull/11172)
+ * Add a CodeQL workflow to check vulnerabilities in the codebase [#11207](https://github.com/vitessio/vitess/pull/11207)
+ * Fix build errors [#11209](https://github.com/vitessio/vitess/pull/11209)
+ * Adding tablet startup check [#11251](https://github.com/vitessio/vitess/pull/11251)
+ * Move vtorc to self-hosted runner [#11255](https://github.com/vitessio/vitess/pull/11255)
+ * Move 12 and 18 back to github runners [#11273](https://github.com/vitessio/vitess/pull/11273)
+ * Flakes: Fix Backup Transform Test Flakiness [#11352](https://github.com/vitessio/vitess/pull/11352)
+ * Move vtorc-8.0 to self-hosted runner [#11384](https://github.com/vitessio/vitess/pull/11384)
+#### Cluster management
+ * Fix examples/compose/docker-compose.yml to run healthy vttablets [#10597](https://github.com/vitessio/vitess/pull/10597)
+ * Include more tests in upgrade tests [#10665](https://github.com/vitessio/vitess/pull/10665)
+ * Fixing flakiness in TestCrossCellDurability and TestHealthCheckCacheWithTabletChurn [#10961](https://github.com/vitessio/vitess/pull/10961)
+ * FlakinessFix: Reparent tests by removing `restore_from_backup` [#11064](https://github.com/vitessio/vitess/pull/11064)
+ * Augment local example to also run VTOrc [#11155](https://github.com/vitessio/vitess/pull/11155)
+#### Documentation
+ * Minor fixes to markdown and test code [#10866](https://github.com/vitessio/vitess/pull/10866)
+#### General
+ * Upgrade to `go1.18.4` [#10705](https://github.com/vitessio/vitess/pull/10705)
+ * Tweak make targets MacOS M1 xc to Linux arches [#10706](https://github.com/vitessio/vitess/pull/10706)
+ * [release-15.0] [deps] go get golang.org/x/text && go mod tidy (#11466) [#11467](https://github.com/vitessio/vitess/pull/11467)
+#### Governance
+ * Update the comment for review checklist with an item for CI workflows [#10471](https://github.com/vitessio/vitess/pull/10471)
+#### Java
+ * Bump gson from 2.8.5 to 2.8.9 in /java [#10353](https://github.com/vitessio/vitess/pull/10353)
+#### Online DDL
+ * Address additional causes of OnlineDDL test flakiness [#11047](https://github.com/vitessio/vitess/pull/11047)
+#### Operator
+ * Fix VTop Example [#10687](https://github.com/vitessio/vitess/pull/10687)
+#### Query Serving
+ * CI Fix: Collation tests [#10839](https://github.com/vitessio/vitess/pull/10839)
+ * Add additional tests for EOF packet checks [#11014](https://github.com/vitessio/vitess/pull/11014)
+#### VReplication
+ * vrepl endtoend flakiness fix via schema versioning [#10804](https://github.com/vitessio/vitess/pull/10804)
+ * Tests: AddShard should use targeted MySQL version [#11006](https://github.com/vitessio/vitess/pull/11006)
+ * Flakes: Use waits instead of checks in vrepl e2e tests [#11048](https://github.com/vitessio/vitess/pull/11048)
+ * Flakes: Prevent VDiff2 test failures when operating near the second boundary [#11054](https://github.com/vitessio/vitess/pull/11054)
+ * Flakes: Eliminate TestVreplicationCopyThrottling Flakes [#11208](https://github.com/vitessio/vitess/pull/11208)
+ * Flakes: Correct TestVReplicationCopyThrottling Logic [#11224](https://github.com/vitessio/vitess/pull/11224)
+#### VTAdmin
+ * fix building logtail, logrotate and vtadmin docker image in Dockerhub [#10968](https://github.com/vitessio/vitess/pull/10968)
+#### VTorc
+ * Flakiness Fix: Tests for GracefulPrimaryTakeover [#11355](https://github.com/vitessio/vitess/pull/11355)
+ * [release-15.0] Move vtorc runners back to normal github runners (#11482) [#11494](https://github.com/vitessio/vitess/pull/11494)
+### Dependabot
+#### Examples
+ * Build(deps): Bump async from 3.2.0 to 3.2.4 in /vitess-mixin/e2e [#10515](https://github.com/vitessio/vitess/pull/10515)
+#### Observability
+ * Bump minimist and cypress in /vitess-mixin/e2e [#11201](https://github.com/vitessio/vitess/pull/11201)
+#### VTAdmin
+ * Bump protobufjs from 6.10.2 to 6.11.3 in /web/vtadmin [#10418](https://github.com/vitessio/vitess/pull/10418)
+ * Build(deps): bump terser from 5.10.0 to 5.14.2 in /web/vtadmin [#10761](https://github.com/vitessio/vitess/pull/10761)
+### Documentation
+#### CLI
+ * [vtctldclient] Update CLI docs for usages, flags, and aliases [#10502](https://github.com/vitessio/vitess/pull/10502)
+ * [vtctldclient] Add autodoc tool for generating website docs [#10635](https://github.com/vitessio/vitess/pull/10635)
+ * [release-15.0] [vtadmin] Do not backtick binary name (#11464) [#11481](https://github.com/vitessio/vitess/pull/11481)
+#### Cluster management
+ * [main] Add the vtorc discovery bug as a known issue to 14.0 (#10711) [#10724](https://github.com/vitessio/vitess/pull/10724)
+#### Documentation
+ * Throttler stats: amendment [#10572](https://github.com/vitessio/vitess/pull/10572)
+ * Improvements to the Summary doc [#11502](https://github.com/vitessio/vitess/pull/11502)
+#### General
+ * release notes: add index to v15 summary [#10829](https://github.com/vitessio/vitess/pull/10829)
+#### Query Serving
+ * added vindex interface breaking change to summary notes [#10693](https://github.com/vitessio/vitess/pull/10693)
+#### VTAdmin
+ * [vtadmin] Document known issue with node versions 17+ [#10483](https://github.com/vitessio/vitess/pull/10483)
+ * [vtadmin] Add authzdocsgen to generate some website docs [#10513](https://github.com/vitessio/vitess/pull/10513)
+### Enhancement
+#### Backup and Restore
+ * expose vtbackup stats at --port /metrics [#11388](https://github.com/vitessio/vitess/pull/11388)
+#### Build/CI
+ * Add name to static check workflow [#10470](https://github.com/vitessio/vitess/pull/10470)
+ * Make etcd based tests more deterministic and surface errors [#10521](https://github.com/vitessio/vitess/pull/10521)
+ * Skip CI workflows on `push` for pull requests [#10768](https://github.com/vitessio/vitess/pull/10768)
+ * Run upgrade/downgrade tests on main [#11022](https://github.com/vitessio/vitess/pull/11022)
+ * Move CI workflow to use latest community version of mysql 8.0 [#11493](https://github.com/vitessio/vitess/pull/11493)
+#### CLI
+ * [cmd/*] Switch to pflag for all CLI flag parsing [#10619](https://github.com/vitessio/vitess/pull/10619)
+ * [go/mysql/*] Move all authserver–related flags off of global flagset [#10752](https://github.com/vitessio/vitess/pull/10752)
+ * [cli] [mysqlctl] Scope all backupstorage implementation flags to `pflag` and relevant binaries [#10844](https://github.com/vitessio/vitess/pull/10844)
+ * [cli] [mysqlctl] Scope `backup_storage_implementation` flag to `pflag` [#10852](https://github.com/vitessio/vitess/pull/10852)
+ * Port vtorc CLI to servenv and pflag [#10911](https://github.com/vitessio/vitess/pull/10911)
+ * [vtexplain] Switch vtexplain flags to use pflag hooks [#10938](https://github.com/vitessio/vitess/pull/10938)
+ * [cli] [vtgate] Migrate `vtgate/buffer` flags to `pflag` [#10939](https://github.com/vitessio/vitess/pull/10939)
+ * [cli] [grpcvtgateconn] Migrate `vtgate/grpcvtgateconn` flags to `pflag` [#10941](https://github.com/vitessio/vitess/pull/10941)
+ * [cli] [vtgate] Migrate `grpcvtgateservice` flags to `pflag` and scope to appropriate binaries. [#10947](https://github.com/vitessio/vitess/pull/10947)
+ * [cli] [vtgr] Migrate all `vtgr` flags to `pflag` [#10952](https://github.com/vitessio/vitess/pull/10952)
+ * [cli] Migrate `vterrors` to `pflag` [#10957](https://github.com/vitessio/vitess/pull/10957)
+ * [cli] [go/mysql/collations/...] Migrate all flags to `pflag` [#10970](https://github.com/vitessio/vitess/pull/10970)
+ * [cli] [tabletconn] Migrate `go/vt/vttablet/tabletconn` to `pflag` [#10999](https://github.com/vitessio/vitess/pull/10999)
+ * [trace] Migrate `go/trace` to use `pflag` for flag definitions [#11028](https://github.com/vitessio/vitess/pull/11028)
+ * [log] Migrate `go/vt/log` flags to `pflag` [#11036](https://github.com/vitessio/vitess/pull/11036)
+ * [cli] [logutil] Migrate flags defined in `go/vt/logutil` to `pflag` [#11044](https://github.com/vitessio/vitess/pull/11044)
+ * [cli] [tabletmanager] Migrate all tabletmanager flags to `pflag` [#11057](https://github.com/vitessio/vitess/pull/11057)
+ * [tmclient] Migrate flags to pflag [#11066](https://github.com/vitessio/vitess/pull/11066)
+ * [cli] [tabletserver/vstreamer] Migrate vstreamer's packet size flags to pflags [#11087](https://github.com/vitessio/vitess/pull/11087)
+ * [cli] [tabletserver/vreplication] Migrate vreplication flags to pflags [#11095](https://github.com/vitessio/vitess/pull/11095)
+ * [cli] [tabletserver/throttler] Migrate throttler flags to pflags [#11100](https://github.com/vitessio/vitess/pull/11100)
+ * [cli] [tabletserver/gc] Migrate gc flags to pflags [#11101](https://github.com/vitessio/vitess/pull/11101)
+ * [cli] [stats] Migrate stats/opentsdb + stats/statsd flags to pflag [#11105](https://github.com/vitessio/vitess/pull/11105)
+ * [cli] [topo/consultopo] Migrate consul flags to pflags [#11106](https://github.com/vitessio/vitess/pull/11106)
+ * [cli] [status] Migrate go/vt/status to pflag [#11107](https://github.com/vitessio/vitess/pull/11107)
+ * [cli] [tabletserver/tabletenv] Migrate tabletenv flags to pflags [#11109](https://github.com/vitessio/vitess/pull/11109)
+ * [cli] [grpc{tabletconn,tmclient}] Migrate flags to `pflag` [#11111](https://github.com/vitessio/vitess/pull/11111)
+ * [cli] [grpcclient] Migrate flags to `pflag` [#11115](https://github.com/vitessio/vitess/pull/11115)
+ * [cli] [grpccommon] Migrate flags to `pflag` [#11122](https://github.com/vitessio/vitess/pull/11122)
+ * [cli] [tabletserver/streamlog] Migrate streamlog flags to pflags [#11125](https://github.com/vitessio/vitess/pull/11125)
+ * [cli] Migrate withddl/workflow/healthstreamer flags to pflags [#11126](https://github.com/vitessio/vitess/pull/11126)
+ * [cli] [servenv] Migrate grpc auth server flags within `servenv` to `pflag` [#11146](https://github.com/vitessio/vitess/pull/11146)
+ * [cli] [servenv] Migrate flags used by grpc servers to `pflag` [#11165](https://github.com/vitessio/vitess/pull/11165)
+ * [cli] [servenv] Migrate missed auth flag to pflag [#11166](https://github.com/vitessio/vitess/pull/11166)
+ * [cli] [servenv] migrate `--service_map` and `pprof` flags to `pflag` [#11179](https://github.com/vitessio/vitess/pull/11179)
+ * [cli] [servenv] Migrate miscellaneous flags to `pflag` [#11186](https://github.com/vitessio/vitess/pull/11186)
+ * [cli] [servenv] Migrate `--version` flag to pflag, and also add to `vtctldclient` and `vtadmin` [#11189](https://github.com/vitessio/vitess/pull/11189)
+ * [cli] [servenv] Migrate `--mysql_server_version` to pflag [#11190](https://github.com/vitessio/vitess/pull/11190)
+ * [cli] Migrate flag to pflag for file/sys logger [#11274](https://github.com/vitessio/vitess/pull/11274)
+ * [cli] Misc pflag binary migrations [#11307](https://github.com/vitessio/vitess/pull/11307)
+ * [cli] [mysqlctl] Migrate mysqlctl flags to pflags [#11314](https://github.com/vitessio/vitess/pull/11314)
+ * [cli] [vtgate/vschemaacl] Migrate VschemaACL flags to pflags [#11315](https://github.com/vitessio/vitess/pull/11315)
+ * [cli] [vtctl] Migrate all vtctl commands to `pflag` [#11320](https://github.com/vitessio/vitess/pull/11320)
+ * Fix adding flags to vtctlclient and vtctldclient [#11322](https://github.com/vitessio/vitess/pull/11322)
+ * [cli] [vtctld] Migrate vtctld flags to pflags [#11326](https://github.com/vitessio/vitess/pull/11326)
+ * [cli] [topo] Migrate topo2topo flags to pflags [#11327](https://github.com/vitessio/vitess/pull/11327)
+ * [cli] [zkctld] Migrate all zkctld flags to pflag [#11329](https://github.com/vitessio/vitess/pull/11329)
+ * [cli] [zkctl] Migrate zkctl flags to pflags [#11331](https://github.com/vitessio/vitess/pull/11331)
+ * [cli] [zk] Migrate zk flags to pflags [#11332](https://github.com/vitessio/vitess/pull/11332)
+ * [cli] [vtbackup] Migrate all vtbackup flags to pflag [#11334](https://github.com/vitessio/vitess/pull/11334)
+ * Move dbconfigs to pflag and remove deprecated flags [#11336](https://github.com/vitessio/vitess/pull/11336)
+ * [cmd/vtctl] Migrate flags to `pflag` [#11339](https://github.com/vitessio/vitess/pull/11339)
+ * [vtctlclient] Migrate to pflag [#11342](https://github.com/vitessio/vitess/pull/11342)
+ * [cli] Migrate cmd/vtclient and cmd/vttablet from flag to pflag [#11349](https://github.com/vitessio/vitess/pull/11349)
+ * [cli] Migrate cmd/vtctld to pflag [#11350](https://github.com/vitessio/vitess/pull/11350)
+ * [asthelpergen] Migrate to pflags [#11363](https://github.com/vitessio/vitess/pull/11363)
+ * [vtexplain] Migrate to pflags [#11364](https://github.com/vitessio/vitess/pull/11364)
+ * Migrates `cmd/vtbench` to pflags [#11366](https://github.com/vitessio/vitess/pull/11366)
+ * [grpcclient] Migrate `--grpc_auth_static_client_creds` to pflag [#11367](https://github.com/vitessio/vitess/pull/11367)
+ * [vtctlclient] Migrate `vtctl_client_protocol` to pflag [#11368](https://github.com/vitessio/vitess/pull/11368)
+ * [flagutil] Cleanup `flag` references [#11381](https://github.com/vitessio/vitess/pull/11381)
+ * Migrate mysqlctl command and package to pflag [#11391](https://github.com/vitessio/vitess/pull/11391)
+ * Migrate ACL package to pflag [#11392](https://github.com/vitessio/vitess/pull/11392)
+ * [cli] [topo] Migrate topo flags to pflags [#11393](https://github.com/vitessio/vitess/pull/11393)
+ * [cli] [etcd2] Migrate etcd2topo flags to pflags [#11394](https://github.com/vitessio/vitess/pull/11394)
+ * [tools/rowlog] Migrate to pflag [#11412](https://github.com/vitessio/vitess/pull/11412)
+ * VTop: Adds a function to get the flag set for a given command [#11424](https://github.com/vitessio/vitess/pull/11424)
+ * Properly deprecate flags and fix default for `--cell` [#11501](https://github.com/vitessio/vitess/pull/11501)
+ * Allow version to be accessible via the -v shorthand [#11512](https://github.com/vitessio/vitess/pull/11512)
+#### Cluster management
+ * Throttler: stats in /debug/vars [#10443](https://github.com/vitessio/vitess/pull/10443)
+ * Adds RPCs to vttablet that vtorc requires [#10464](https://github.com/vitessio/vitess/pull/10464)
+ * vtctl GetSchema --table_schema_only [#10552](https://github.com/vitessio/vitess/pull/10552)
+ * Deprecate enable-semi-sync in favour of RPC parameter [#10695](https://github.com/vitessio/vitess/pull/10695)
+ * Add GetFullStatus RPC to vtctld [#10905](https://github.com/vitessio/vitess/pull/10905)
+ * Simply Replication Status proto conversions [#10926](https://github.com/vitessio/vitess/pull/10926)
+ * Improve PRS to validate new primary can make forward progress [#11308](https://github.com/vitessio/vitess/pull/11308)
+ * [cli] Topo: Migrate zk2topo and k8stopo to pflag [#11401](https://github.com/vitessio/vitess/pull/11401)
+ * remove excessive logging [#11479](https://github.com/vitessio/vitess/pull/11479)
+#### Examples
+ * Give all permissions in rbac in examples [#11463](https://github.com/vitessio/vitess/pull/11463)
+ * Fix Vitess Operator example [#11546](https://github.com/vitessio/vitess/pull/11546)
+#### General
+ * [cli] Migrate miscellaneous components from flag to pflag [#11347](https://github.com/vitessio/vitess/pull/11347)
+ * Move vttlstest to pflag and cobra [#11361](https://github.com/vitessio/vitess/pull/11361)
+ * Move vtaclcheck command to pflags [#11372](https://github.com/vitessio/vitess/pull/11372)
+ * Migrate mysqlctld from flag to pflag [#11376](https://github.com/vitessio/vitess/pull/11376)
+ * removing unncessary flags across binaries [#11495](https://github.com/vitessio/vitess/pull/11495)
+ * [release-15.0] Upgrade to `go1.18.7` [#11507](https://github.com/vitessio/vitess/pull/11507)
+ * Removing redundant flags across binaries [#11522](https://github.com/vitessio/vitess/pull/11522)
+#### Observability
+ * Add SessionUUID and transaction mark to vtgate query logs [#10427](https://github.com/vitessio/vitess/pull/10427)
+#### Online DDL
+ * [cli] [tabletserver/onlineddl] Migrate onlineddl flags to pflags [#11099](https://github.com/vitessio/vitess/pull/11099)
+#### Query Serving
+ * Refactor aggregation AST structs [#10347](https://github.com/vitessio/vitess/pull/10347)
+ * Concurrent vitess migrations [#10410](https://github.com/vitessio/vitess/pull/10410)
+ * Make vtgate streamlog buffer configurable [#10426](https://github.com/vitessio/vitess/pull/10426)
+ * fix: change planner_version to planner-version everywhere [#10453](https://github.com/vitessio/vitess/pull/10453)
+ * enable schema tracking by default [#10455](https://github.com/vitessio/vitess/pull/10455)
+ * Add support for alter table rename column [#10469](https://github.com/vitessio/vitess/pull/10469)
+ * schemadiff: `ColumnRenameStrategy` in DiffHints [#10472](https://github.com/vitessio/vitess/pull/10472)
+ * Add parsing support for performance schema functions [#10478](https://github.com/vitessio/vitess/pull/10478)
+ * schemadiff: TableRenameStrategy in DiffHints [#10479](https://github.com/vitessio/vitess/pull/10479)
+ * OnlineDDL executor: adding log entries [#10482](https://github.com/vitessio/vitess/pull/10482)
+ * Fix: handle all cases for consistent lookup unique on single transaction mode [#10493](https://github.com/vitessio/vitess/pull/10493)
+ * Cleanup: Remove 'Name' field from aggregate structure [#10507](https://github.com/vitessio/vitess/pull/10507)
+ * New explain format: VTEXPLAIN [#10556](https://github.com/vitessio/vitess/pull/10556)
+ * Insert with select using streaming call [#10577](https://github.com/vitessio/vitess/pull/10577)
+ * Add parsing support for GTID functions [#10579](https://github.com/vitessio/vitess/pull/10579)
+ * [14.0] Schema tracking acl error logging [#10591](https://github.com/vitessio/vitess/pull/10591)
+ * Update how table uses are reported [#10598](https://github.com/vitessio/vitess/pull/10598)
+ * Parse INTERVAL() function [#10599](https://github.com/vitessio/vitess/pull/10599)
+ * VReplication: throttling info for both source and target; Online DDL propagates said info [#10601](https://github.com/vitessio/vitess/pull/10601)
+ * Online DDL: increase stale migration timeout [#10614](https://github.com/vitessio/vitess/pull/10614)
+ * Online DDL: even more logging [#10615](https://github.com/vitessio/vitess/pull/10615)
+ * Parse LOCATE(), POSITION() and CHAR() functions [#10629](https://github.com/vitessio/vitess/pull/10629)
+ * Improve handling of MATCH AGAINST [#10633](https://github.com/vitessio/vitess/pull/10633)
+ * Accept geomcollection as alias for geometrycollection [#10641](https://github.com/vitessio/vitess/pull/10641)
+ * Fix stats for cache miss and add CachePlan for Vtgate [#10643](https://github.com/vitessio/vitess/pull/10643)
+ * Support lookup multi shard autocommit [#10652](https://github.com/vitessio/vitess/pull/10652)
+ * Online DDL: ALTER VITESS_MIGRATION COMPLETE ALL [#10694](https://github.com/vitessio/vitess/pull/10694)
+ * Improve performance of `information_schema` queries on MySQL 8. [#10703](https://github.com/vitessio/vitess/pull/10703)
+ * ApplySchema: renew keyspace lock while iterating SQLs [#10727](https://github.com/vitessio/vitess/pull/10727)
+ * ApplySchema: do not ReloadSchema on ExecuteFetchAsDba [#10739](https://github.com/vitessio/vitess/pull/10739)
+ * Online DDL: issue a ReloadSchema at the completion of any migration [#10766](https://github.com/vitessio/vitess/pull/10766)
+ * refactor: make resource pool as interface and pool refresh as common [#10784](https://github.com/vitessio/vitess/pull/10784)
+ * Online DDL: migration state transitions to 'cancelled' after CANCEL command [#10900](https://github.com/vitessio/vitess/pull/10900)
+ * add vttablet cli flags for stream consolidator [#10907](https://github.com/vitessio/vitess/pull/10907)
+ * Online DDL: --pospone-launch, ALTER VITESS_MIGRATION ... LAUNCH [#10915](https://github.com/vitessio/vitess/pull/10915)
+ * Implement date, time and timestamp literals [#10921](https://github.com/vitessio/vitess/pull/10921)
+ * add the selected keyspace to LogStats [#10924](https://github.com/vitessio/vitess/pull/10924)
+ * schemadiff: rich error for unmet view dependencies [#10940](https://github.com/vitessio/vitess/pull/10940)
+ * Improve route merging for queries that have conditions on different vindexes, but can be merged via join predicates. [#10942](https://github.com/vitessio/vitess/pull/10942)
+ * decouple olap tx timeout from oltp tx timeout [#10946](https://github.com/vitessio/vitess/pull/10946)
+ * Merge subqueries that "join" on lookup index columns. [#10966](https://github.com/vitessio/vitess/pull/10966)
+ * Remove prefill logic from resource pool [#11002](https://github.com/vitessio/vitess/pull/11002)
+ * schemadiff: FullTextKeyStrategy, handling multiple 'ADD FULLTEXT key' alter options [#11012](https://github.com/vitessio/vitess/pull/11012)
+ * Online DDL: support multiple 'ADD FULLTEXT KEY' in single ALTER [#11013](https://github.com/vitessio/vitess/pull/11013)
+ * refactor: group all system setting query into single set statement [#11021](https://github.com/vitessio/vitess/pull/11021)
+ * System Settings connections pool implementation [#11037](https://github.com/vitessio/vitess/pull/11037)
+ * Improve schema reload performance by pre-filtering joined rows. [#11043](https://github.com/vitessio/vitess/pull/11043)
+ * Improve merging for `None` route opcodes. [#11045](https://github.com/vitessio/vitess/pull/11045)
+ * Add possibility of viewing plans with graphviz [#11050](https://github.com/vitessio/vitess/pull/11050)
+ * Use available method to compare tables [#11056](https://github.com/vitessio/vitess/pull/11056)
+ * schemadiff: Fix handling of primary key [#11059](https://github.com/vitessio/vitess/pull/11059)
+ * No reserved connection on modifying system settings [#11088](https://github.com/vitessio/vitess/pull/11088)
+ * tabletserver stream replace schema name bindvar [#11090](https://github.com/vitessio/vitess/pull/11090)
+ * Online DDL: introduce '--max_concurrent_online_ddl' [#11091](https://github.com/vitessio/vitess/pull/11091)
+ * return resource back to pool on apply settings failure [#11096](https://github.com/vitessio/vitess/pull/11096)
+ * [Gen4] Merge `SeenPredicates` when creating route operator for join [#11104](https://github.com/vitessio/vitess/pull/11104)
+ * Two changes to the error sanitizer [#11114](https://github.com/vitessio/vitess/pull/11114)
+ * Online DDL: more error logging [#11117](https://github.com/vitessio/vitess/pull/11117)
+ * Add parsing for Offsets similar to bind-variables [#11120](https://github.com/vitessio/vitess/pull/11120)
+ * Fix typing error in constant for wait_until_sql_thread_after_gtids [#11121](https://github.com/vitessio/vitess/pull/11121)
+ * Treat `IN` operations on single value tuples as `Equal` operations. [#11123](https://github.com/vitessio/vitess/pull/11123)
+ * adding setting pool metrics [#11175](https://github.com/vitessio/vitess/pull/11175)
+ * Adds delete planning to Gen4 [#11177](https://github.com/vitessio/vitess/pull/11177)
+ * generate settings plan in tablet with query and reset setting query [#11181](https://github.com/vitessio/vitess/pull/11181)
+ * Add Metric For Time Elapsed In Getting Connection In Pools [#11213](https://github.com/vitessio/vitess/pull/11213)
+ * Online DDL: more info in a conflicting migration message [#11217](https://github.com/vitessio/vitess/pull/11217)
+ * addressing review comments from #11088 [#11221](https://github.com/vitessio/vitess/pull/11221)
+ * Reapply system settings on connection reconnect [#11256](https://github.com/vitessio/vitess/pull/11256)
+ * Allow non-SSL callers of VTGate RPC APIs to specify group information for the CallerID [#11260](https://github.com/vitessio/vitess/pull/11260)
+ * Move go/mysql flags to pflags [#11272](https://github.com/vitessio/vitess/pull/11272)
+ * feat: rewrite column names in HAVING [#11306](https://github.com/vitessio/vitess/pull/11306)
+ * advisory lock to acquire reserve connection only for get_lock [#11359](https://github.com/vitessio/vitess/pull/11359)
+ * fix: store the output of the rewrite [#11362](https://github.com/vitessio/vitess/pull/11362)
+ * gen4 planner: small cleanup [#11403](https://github.com/vitessio/vitess/pull/11403)
+#### TabletManager
+ * Improve topo handling and add additional functionality [#10906](https://github.com/vitessio/vitess/pull/10906)
+ * Replication Manager Improvements [#11194](https://github.com/vitessio/vitess/pull/11194)
+#### VReplication
+ * Partial Movetables: allow moving a keyspace one shard at a time [#9987](https://github.com/vitessio/vitess/pull/9987)
+ * Fail VReplication workflows on errors that persist and unrecoverable errors [#10429](https://github.com/vitessio/vitess/pull/10429)
+ * VDiff2: Support Resuming VDiffs [#10497](https://github.com/vitessio/vitess/pull/10497)
+ * Implement VDiff2 Delete Action [#10608](https://github.com/vitessio/vitess/pull/10608)
+ * VDiff2: Auto retry to continue on error [#10639](https://github.com/vitessio/vitess/pull/10639)
+ * VDiff2: Add --wait flag to Create/Resume actions [#10799](https://github.com/vitessio/vitess/pull/10799)
+ * VDiff2: Add Stop Action [#10830](https://github.com/vitessio/vitess/pull/10830)
+ * Add tracking session state changes for transaction start [#11061](https://github.com/vitessio/vitess/pull/11061)
+ * Port time zone handling from vdiff1 to vdiff2 [#11128](https://github.com/vitessio/vitess/pull/11128)
+ * VDiff2: Add support for Mount+Migrate [#11204](https://github.com/vitessio/vitess/pull/11204)
+ * VStreams: Rotate Binary Log For Snapshot Connections [#11344](https://github.com/vitessio/vitess/pull/11344)
+ * For partial MoveTables, setup reverse shard routing rules on workflow creation [#11415](https://github.com/vitessio/vitess/pull/11415)
+#### VTAdmin
+ * using nginx for vtadmin web [#10770](https://github.com/vitessio/vitess/pull/10770)
+ * [VTAdmin] `Validate`, `ValidateShard`, `ValidateVersionShard`, `GetFullStatus` [#11438](https://github.com/vitessio/vitess/pull/11438)
+ * Full Status tab improvements for VTAdmin [#11470](https://github.com/vitessio/vitess/pull/11470)
+ * [15.0] Add VTGate debug/status page link to VTAdmin [#11541](https://github.com/vitessio/vitess/pull/11541)
+#### VTorc
+ * Replicas should be able to heal if replication is not initialised properly [#10943](https://github.com/vitessio/vitess/pull/10943)
+ * Getting rid of external logging [#11085](https://github.com/vitessio/vitess/pull/11085)
+ * Moving math package from external libarary [#11147](https://github.com/vitessio/vitess/pull/11147)
+ * Prevent martini from logging in VTOrc [#11173](https://github.com/vitessio/vitess/pull/11173)
+ * Only refresh required tablet's information in VTOrc [#11220](https://github.com/vitessio/vitess/pull/11220)
+ * Parameterize VTOrc constants [#11254](https://github.com/vitessio/vitess/pull/11254)
+ * Introduce `servenv` status pages in VTOrc [#11263](https://github.com/vitessio/vitess/pull/11263)
+ * Addition of Metrics to VTOrc to track the number of recoveries ran and their success count. [#11338](https://github.com/vitessio/vitess/pull/11338)
+ * VTOrc cleanup: Remove unused CLI code and move relevant APIs to the new VTOrc UI [#11370](https://github.com/vitessio/vitess/pull/11370)
+#### vtctl
+ * Add order, limit, skip options to onlineddl show command [#10651](https://github.com/vitessio/vitess/pull/10651)
+#### vtexplain
+ * `vtexplain` fails for vindex lookup queries with duplicate / equivalent values. [#10996](https://github.com/vitessio/vitess/pull/10996)
+### Feature Request
+#### Backup and Restore
+ * Backup/Restore: add support for external compressors and decompressors [#10558](https://github.com/vitessio/vitess/pull/10558)
+#### Evalengine
+ * evalengine: Support built-in MySQL function CEIL() [#11027](https://github.com/vitessio/vitess/pull/11027)
+#### VTAdmin
+ * add vtadmin docker image [#10543](https://github.com/vitessio/vitess/pull/10543)
+#### web UI
+ * [VTAdmin] RebuildKeyspaceGraph, RemoveKeyspaceCell, NewShard [#11249](https://github.com/vitessio/vitess/pull/11249)
+ * VTAdmin: shard actions [#11328](https://github.com/vitessio/vitess/pull/11328)
+ * [VTAdmin] Cherry Pick Topology Browser [#11518](https://github.com/vitessio/vitess/pull/11518)
+### Internal Cleanup
+#### Build/CI
+ * upgrade versions of security vulnerable packages crypto/net/serf [#10272](https://github.com/vitessio/vitess/pull/10272)
+ * update golangci-lint to 1.46.2 [#10568](https://github.com/vitessio/vitess/pull/10568)
+ * Update to latest Protobuf 21.3 release [#10803](https://github.com/vitessio/vitess/pull/10803)
+ * Always close body for HTTP requests in tests [#10835](https://github.com/vitessio/vitess/pull/10835)
+ * Always setup an underlying topo for a sandbox [#10882](https://github.com/vitessio/vitess/pull/10882)
+ * Cleanup the go-sqlite3 workaround [#10884](https://github.com/vitessio/vitess/pull/10884)
+ * Cleanup usage of go.rice in favor of go:embed [#10956](https://github.com/vitessio/vitess/pull/10956)
+#### CLI
+ * [cli][discovery]: migrate discovery flags to pflag [#10863](https://github.com/vitessio/vitess/pull/10863)
+ * [vtcombo] Delete `flag.Set` call on non-existent flag [#10889](https://github.com/vitessio/vitess/pull/10889)
+ * Move goyacc to use pflags package [#11092](https://github.com/vitessio/vitess/pull/11092)
+ * Move sqlparser flags to use pflags [#11094](https://github.com/vitessio/vitess/pull/11094)
+ * vtgate pflags migration [#11318](https://github.com/vitessio/vitess/pull/11318)
+ * [cli] `vttestserver` flag parsing to use pflags [#11321](https://github.com/vitessio/vitess/pull/11321)
+ * customrule pflags migration [#11340](https://github.com/vitessio/vitess/pull/11340)
+ * srvtopo pflags migration [#11341](https://github.com/vitessio/vitess/pull/11341)
+ * [cli] Begrudgingly shim `flag.Parse` call to trick glog [#11382](https://github.com/vitessio/vitess/pull/11382)
+ * [cli] Use pflag/flag interop function in vtctldclient legacy shim [#11399](https://github.com/vitessio/vitess/pull/11399)
+ * Fix vtbackup binary by adding the flags it needs that we missed before [#11417](https://github.com/vitessio/vitess/pull/11417)
+#### Cluster management
+ * Remove legacy healthcheck files and structures [#10542](https://github.com/vitessio/vitess/pull/10542)
+ * Proto file lint fix and vtadmin generated file [#10563](https://github.com/vitessio/vitess/pull/10563)
+ * Cleanup: un-explode GetSchema and reuse GetSchemaRequest struct [#10578](https://github.com/vitessio/vitess/pull/10578)
+ * [vtctl] Delete query commands [#10646](https://github.com/vitessio/vitess/pull/10646)
+ * Cleanup: ERS and PRS tests by removing setupShardLegacy [#10728](https://github.com/vitessio/vitess/pull/10728)
+ * Refactor: Unexplode Backup() function, pass BackupRequest as argument [#10904](https://github.com/vitessio/vitess/pull/10904)
+ * Deprecate orchestrator integration [#11409](https://github.com/vitessio/vitess/pull/11409)
+ * Adding deprecate message to backup hooks [#11491](https://github.com/vitessio/vitess/pull/11491)
+ * [15.0] Deprecate InitShardPrimary command [#11557](https://github.com/vitessio/vitess/pull/11557)
+#### Evalengine
+ * evalengine: expose Filter operations [#10903](https://github.com/vitessio/vitess/pull/10903)
+ * Move evalengine integration tests to use pflags [#11378](https://github.com/vitessio/vitess/pull/11378)
+#### General
+ * Remove v2 resharding fields [#10409](https://github.com/vitessio/vitess/pull/10409)
+ * Remove @doeg from a subset of CODEOWNERS [#10557](https://github.com/vitessio/vitess/pull/10557)
+ * Remove @doeg from maintainers [#10625](https://github.com/vitessio/vitess/pull/10625)
+ * Remove the release notes document from the main branch [#10672](https://github.com/vitessio/vitess/pull/10672)
+ * Delete `go/vt/vttime` [#10995](https://github.com/vitessio/vitess/pull/10995)
+#### Observability
+ * flags etc: delete old flags and stats, add deprecation notice to release notes [#11402](https://github.com/vitessio/vitess/pull/11402)
+#### Query Serving
+ * Extract vindex lookup queries into their own primitive [#10490](https://github.com/vitessio/vitess/pull/10490)
+ * Reduce shift-reduce conflicts [#10500](https://github.com/vitessio/vitess/pull/10500)
+ * feat: don't stop if compilation errors are happening on the generated files [#10506](https://github.com/vitessio/vitess/pull/10506)
+ * User defined and sys variables [#10547](https://github.com/vitessio/vitess/pull/10547)
+ * refactor: removed context from part of vcursor struct [#10632](https://github.com/vitessio/vitess/pull/10632)
+ * Unexplode return values for queryservice [#10802](https://github.com/vitessio/vitess/pull/10802)
+ * Mark aggregate functions callable [#10805](https://github.com/vitessio/vitess/pull/10805)
+ * Separate function for creating bind variables [#10883](https://github.com/vitessio/vitess/pull/10883)
+ * check for nil earlier [#10887](https://github.com/vitessio/vitess/pull/10887)
+ * refactor: minor refactor in partial shard routing and change in flag to dashes [#11357](https://github.com/vitessio/vitess/pull/11357)
+ * Delete deprecated flags [#11360](https://github.com/vitessio/vitess/pull/11360)
+ * Remove deprecated IsSkipTopo() function [#11377](https://github.com/vitessio/vitess/pull/11377)
+#### TabletManager
+ * refactor: unexplode VStreamRows() and reuse VStreamRowsRequest, unexplode VStream() and reuse VStreamRequest [#10671](https://github.com/vitessio/vitess/pull/10671)
+ * [tmclient] [tmserver] Unexplode fetchers [#10998](https://github.com/vitessio/vitess/pull/10998)
+#### VReplication
+ * Delete all legacy sharding related code [#10278](https://github.com/vitessio/vitess/pull/10278)
+#### VTAdmin
+ * [vtadmin] Rename ERS/PRS pools+flags properly [#10460](https://github.com/vitessio/vitess/pull/10460)
+#### VTorc
+ * Use introduced tablet manager RPCs in VTOrc [#10467](https://github.com/vitessio/vitess/pull/10467)
+ * Remove logging in GetDurabilityPolicy [#10516](https://github.com/vitessio/vitess/pull/10516)
+ * VTOrc Cleanup: Remove KV stores [#10645](https://github.com/vitessio/vitess/pull/10645)
+ * Use TMC RPCs in VTOrc [#10664](https://github.com/vitessio/vitess/pull/10664)
+ * Nil-check errors before printing them in VTOrc [#11156](https://github.com/vitessio/vitess/pull/11156)
+ * Cluster-Alias cleanup for VTOrc [#11193](https://github.com/vitessio/vitess/pull/11193)
+ * Refactor: Rename Orchestrator to VTOrc in the codebase [#11231](https://github.com/vitessio/vitess/pull/11231)
+ * VTOrc Cleanup - Configs, APIs and old UI [#11356](https://github.com/vitessio/vitess/pull/11356)
+ * VTOrc Standardisation and Cleanup [#11416](https://github.com/vitessio/vitess/pull/11416)
+ * [vtorc] Remove duplicated vt/log import [#11423](https://github.com/vitessio/vitess/pull/11423)
+#### vtctl
+ * [vtctl] delete all throttler commands and associated cmd imports [#10661](https://github.com/vitessio/vitess/pull/10661)
+#### web UI
+ * Remove sharding_column_name and sharding_column_type from vtctld2 [#10459](https://github.com/vitessio/vitess/pull/10459)
+### Other
+#### Other
+ * Build(deps): Bump mysql-connector-java from 8.0.25 to 8.0.28 in /java/example [#10551](https://github.com/vitessio/vitess/pull/10551)
+### Performance
+#### Query Serving
+ * schemadiff performance improvements [#11035](https://github.com/vitessio/vitess/pull/11035)
+ * schemadiff: Shallow copy of the schema [#11041](https://github.com/vitessio/vitess/pull/11041)
+ * [vtgate] Add flag to pool connection read buffers [#11167](https://github.com/vitessio/vitess/pull/11167)
+#### TabletManager
+ * Tablet Executor: consolidate ReloadSchema calls, and skip for Online DDL [#10719](https://github.com/vitessio/vitess/pull/10719)
+### Regression
+#### Backup and Restore
+ * revert default compression engine [#11029](https://github.com/vitessio/vitess/pull/11029)
+### Release
+#### Build/CI
+ * Rework how the `release notes` labels are handled by the CI [#10508](https://github.com/vitessio/vitess/pull/10508)
+ * Rework the generation of the release notes [#10510](https://github.com/vitessio/vitess/pull/10510)
+ * Addition of the v14 release notes documents [#10602](https://github.com/vitessio/vitess/pull/10602)
+#### CLI
+ * Migrates `release-notes` to pflag [#11365](https://github.com/vitessio/vitess/pull/11365)
+#### Deployments
+ * Code freeze of release-15.0 [#11565](https://github.com/vitessio/vitess/pull/11565)
+#### Documentation
+ * Update the release documentation [#11174](https://github.com/vitessio/vitess/pull/11174)
+ * Add hyperlink in the release changelog [#11241](https://github.com/vitessio/vitess/pull/11241)
+#### General
+ * Post release `v14.0.0-RC1` steps [#10458](https://github.com/vitessio/vitess/pull/10458)
+ * Documented the legacy healthcheck and tabletgateway and added summary to 14's summary [#10567](https://github.com/vitessio/vitess/pull/10567)
+ * Addition of the v14 release docs on main [#10606](https://github.com/vitessio/vitess/pull/10606)
+ * [main] Addition of the release notes summary for v14.0.1 (#10821) [#10837](https://github.com/vitessio/vitess/pull/10837)
+ * [main] Release summary 13.0.2 (#10820) [#10838](https://github.com/vitessio/vitess/pull/10838)
+ * Addition of the release notes for `v13.0.2` [#10849](https://github.com/vitessio/vitess/pull/10849)
+ * Addition of the release notes for v14.0.1 [#10851](https://github.com/vitessio/vitess/pull/10851)
+ * Addition of the release notes for v12.0.5 [#10873](https://github.com/vitessio/vitess/pull/10873)
+ * Include the compose examples in the `do_release` script [#11130](https://github.com/vitessio/vitess/pull/11130)
+ * do_release: fix updateVitessExamples function call [#11134](https://github.com/vitessio/vitess/pull/11134)
+ * Upgrade go version to `1.18.5` on `main` [#11136](https://github.com/vitessio/vitess/pull/11136)
+ * Addition of the release notes for `v14.0.2` [#11160](https://github.com/vitessio/vitess/pull/11160)
+ * Addition of the release notes for `v13.0.3` [#11162](https://github.com/vitessio/vitess/pull/11162)
+ * Addition of the release notes for `v12.0.6` [#11164](https://github.com/vitessio/vitess/pull/11164)
+ * Simple code freeze script and workflow [#11178](https://github.com/vitessio/vitess/pull/11178)
+ * Improve the `do_release` script to have two different Pull Requests instead of one during a release [#11197](https://github.com/vitessio/vitess/pull/11197)
+ * Release notes 14.0.3 on main [#11406](https://github.com/vitessio/vitess/pull/11406)
+ * Code freeze of release-15.0 [#11427](https://github.com/vitessio/vitess/pull/11427)
+ * Release of v15.0.0-rc1 [#11443](https://github.com/vitessio/vitess/pull/11443)
+ * Back to dev mode after v15.0.0-rc1 [#11444](https://github.com/vitessio/vitess/pull/11444)
+ * fixing urls [#11572](https://github.com/vitessio/vitess/pull/11572)
+### Testing
+#### Backup and Restore
+ * Enable VTOrc in backup tests [#11410](https://github.com/vitessio/vitess/pull/11410)
+#### Build/CI
+ * test: reduce number of vttablets to start in the tests [#10491](https://github.com/vitessio/vitess/pull/10491)
+ * test: for unit tests set TMPDIR=/tmp_XXXXXX on mac [#10655](https://github.com/vitessio/vitess/pull/10655)
+ * CI: mysql8 test for schemadiff_vrepl [#10679](https://github.com/vitessio/vitess/pull/10679)
+ * Fixes to config file and flakiness fix for TestFloatValueDefault [#10710](https://github.com/vitessio/vitess/pull/10710)
+ * Flakes: Expect SERVING status for tablets added to shard with a PRIMARY [#11007](https://github.com/vitessio/vitess/pull/11007)
+#### CLI
+ * [cli] [vttest] Extend vttest.TopoData to implement `pflag.Value`, and make function return types implicit [#10994](https://github.com/vitessio/vitess/pull/10994)
+ * [cli] [vtcombo|tests] Migrate `vtcombo` to `pflag` and rewrite tabletconn tests to not need TabletProtocol exported [#11010](https://github.com/vitessio/vitess/pull/11010)
+#### Cluster management
+ * Fix incorrect use of loop variable in parallel test [#11082](https://github.com/vitessio/vitess/pull/11082)
+#### General
+ * fix minor code unreachability error [#10771](https://github.com/vitessio/vitess/pull/10771)
+#### Query Serving
+ * unit test: fix mysql tests to run on MacOS [#10613](https://github.com/vitessio/vitess/pull/10613)
+ * Use many more valid test cases [#10640](https://github.com/vitessio/vitess/pull/10640)
+ * test: set parameter on vtgate than on vttablet [#10698](https://github.com/vitessio/vitess/pull/10698)
+ * Addition of a test in aggr_cases for grouping on data from derived table [#10868](https://github.com/vitessio/vitess/pull/10868)
+ * Format Gen4 end-to-end tests [#11089](https://github.com/vitessio/vitess/pull/11089)
+ * Fix `TestInvalidDateTimeTimestampVals` linter issues [#11098](https://github.com/vitessio/vitess/pull/11098)
+ * Use vtparams instead of clusterInstance in TestNormalizeAllFields [#11102](https://github.com/vitessio/vitess/pull/11102)
+ * test: deflake TestIdleTimeoutCreateFail [#11411](https://github.com/vitessio/vitess/pull/11411)
+ * Use JSON for plan tests [#11430](https://github.com/vitessio/vitess/pull/11430)
+#### VTAdmin
+ * [vtadmin] authz tests - tablet actions [#10457](https://github.com/vitessio/vitess/pull/10457)
+ * [vtadmin] Add authz tests for remaining non-schema related actions [#10481](https://github.com/vitessio/vitess/pull/10481)
+ * [vtadmin] Add schema-related authz tests [#10486](https://github.com/vitessio/vitess/pull/10486)
+ * [vtadmin/tests] Serialize Schema test cases to avoid cache backfill races [#10538](https://github.com/vitessio/vitess/pull/10538)
+ * [vtadmin] fix flaky GetSchemas test cases [#10555](https://github.com/vitessio/vitess/pull/10555)
+#### web UI
+ * Fixing flaky vtctld2 web test [#10541](https://github.com/vitessio/vitess/pull/10541)
+
diff --git a/changelog/15.0/15.0.0/release_notes.md b/changelog/15.0/15.0.0/release_notes.md
new file mode 100644
index 00000000000..f60fd0547f1
--- /dev/null
+++ b/changelog/15.0/15.0.0/release_notes.md
@@ -0,0 +1,463 @@
+# Release of Vitess v15.0.0
+## Summary
+
+- **[Known Issues](#known-issues)**
+- **[Breaking Changes](#breaking-changes)**
+ - [Flags](#flags)
+ - [VTTablet Flag Deletions](#vttablet-flag-deletions)
+ - [Vindex Interface](#vindex-interface)
+- **[Deprecations](#deprecations)**
+ - [LogStats Table and Keyspace Deprecated](#logstats-table-and-keyspace-deprecated)
+ - [Orchestrator Integration Deprecation](#orchestrator-integration-deprecation)
+ - [Connection Pool Prefill](#connection-pool-prefill)
+ - [InitShardPrimary Deprecation](#initshardprimary-deprecation)
+- **[Command-Line Syntax Deprecations](#command-line-syntax-deprecations)**
+ - [VTTablet Startup Flag Deletions](#vttablet-startup-flag-deletions)
+ - [VTTablet Startup Flag Deprecations](#vttablet-startup-flag-deprecations)
+ - [VTBackup Flag Deprecations](#vtbackup-flag-deprecations)
+- **[VTGate](#vtgate)**
+ - [vtgate --mysql-server-pool-conn-read-buffers](#vtgate--mysql-server-pool-conn-read-buffers)
+- **[VDiff2](#vdiff2)**
+ - [Resume Workflow](#resume-workflow)
+- **[New command line flags and behavior](#new-command-line)**
+ - [vtctl GetSchema --table-schema-only](#vtctl-getschema--table-schema-only)
+ - [Support for Additional Compressors and Decompressors During Backup & Restore](#support-for-additional-compressors-and-decompressors-during-backup-&-restore)
+ - [Independent OLAP and OLTP Transactional Timeouts](#independant-olap-and-oltp-transactional-timeouts)
+ - [Support for Specifying Group Information in Calls to VTGate](#support-for-specifying-group-information-in-calls-to-vtgate)
+- **[Online DDL Changes](#online-ddl-changes)**
+ - [Concurrent Vitess Migrations](#concurrent-vitess-migrations)
+ - [VTCtl Command Changes](#vtctl-command-changes)
+ - [New Syntax](#new-syntax)
+- **[Tablet Throttler](#tablet-throttler)**
+ - [API Changes](#api-changes)
+- **[Mysql Compatibility](#mysql-compatibility)**
+ - [System Settings](#system-settings)
+ - [Lookup Vindexes](#lookup-vindexes)
+- **[Durability Policy](#durability-policy)**
+ - [Cross Cell](#cross-cell)
+- **[New EXPLAIN Format](#new-explain-format)**
+ - [FORMAT=vtexplain](#formatvtexplain)
+- **[VTOrc](#vtorc)**
+ - [Old UI Removal and Replacement](#old-ui-removal-and-replacement)
+ - [Configuration Refactor and New Flags](#configuratoin-refactor-and-new-flags)
+ - [Example Upgrade](#example-upgrade)
+ - [Default Configuration Files](#default-configuration-files)
+- **[Flags Restructure](#flags-restructure)**
+ - [Flags Diff](#flags-diff)
+
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
+
+## Major Changes
+
+### Breaking Changes
+
+#### Flags
+
+- The deprecated `--cpu_profile` flag has been removed. Please use the `--pprof` flag instead.
+- The deprecated `--mem-profile-rate` flag has been removed. Please use `--pprof=mem` instead.
+- The deprecated `--mutex-profile-fraction` flag has been removed. Please use `--pprof=mutex` instead.
+- The deprecated vtgate/vtexplain/vtcombo flag `--planner_version` has been removed. Please use `--planner-version` instead.
+- The deprecated flag `--master_connect_retry` has been removed. Please use `--replication_connect_retry` instead.
+- `vtctl` commands that take shard names and ranges as positional arguments (e.g. `vtctl Reshard ks.workflow -80 -40,40-80`) need to have their positional arguments separated from their flag arguments by a double-dash separator to avoid the new parsing library from mistaking them as flags (e.g. `vtctl Reshard ks.workflow -- -80 -40,40-80`).
+- The `--cell` flag in the `vtgate` binary no longer has a default value. It is a required argument that has to be specified for the binary to run. Please explicitly specify the flag, if dependent on the flag's default value.
+- The `--db-config-*-*` VTTablet flags were deprecated in `v3.0.0`. They have now been deleted as part of this release. You must use `--db_dba_*` now.
+
+#### vttablet Flag Deletions
+The following VTTablet flags were deprecated in 7.0. They have now been deleted
+- `--queryserver-config-message-conn-pool-size`
+- `--queryserver-config-message-conn-pool-prefill-parallelism`
+- `--client-found-rows-pool-size` A different existing flag `--queryserver-config-transaction-cap` will be used instead
+- `--transaction_shutdown_grace_period` Use `--shutdown_grace_period` instead
+- `--queryserver-config-max-dml-rows`
+- `--queryserver-config-allowunsafe-dmls`
+- `--pool-name-prefix`
+- `--enable-autocommit` Autocommit is always allowed
+
+#### Vindex Interface
+
+All the vindex interface methods are changed by adding `context.Context` as an input parameter.
+
+E.g:
+```go
+Map(vcursor VCursor, .... ) ....
+ To
+Map(ctx context.Context, vcursor VCursor, .... ) ....
+```
+
+This only affects users who have added their own custom vindex implementation.
+They are required to change their implementation with these new interface method expectations.
+
+### Deprecations
+
+#### LogStats Table and Keyspace deprecated
+
+Information about which tables are used was being reported through the `Keyspace` and `Table` fields on LogStats.
+For multi-table queries, this output can be confusing, so we have added `TablesUsed`, that is a string array, listing all tables and which keyspace they are on.
+`Keyspace` and `Table` fields are deprecated and will be removed in the v16 release of Vitess.
+
+#### Orchestrator Integration Deprecation
+
+Orchestrator integration in `vttablet` has been deprecated. It will continue to work in this release but is liable to be removed in future releases.
+Consider using VTOrc instead of Orchestrator as VTOrc goes GA in this release.
+
+#### Connection Pool Prefill
+
+The connection pool with prefilled connections have been removed. The pool now does lazy connection creation.
+
+#### InitShardPrimary Deprecation
+
+The vtcltd command InitShardPrimary has been deprecated. Please use PlannedReparentShard instead.
+
+### Command-line syntax deprecations
+
+#### vttablet startup flag deletions
+The following VTTablet flags were deprecated in 7.0. They have now been deleted
+- --queryserver-config-message-conn-pool-size
+- --queryserver-config-message-conn-pool-prefill-parallelism
+- --client-found-rows-pool-size --queryserver-config-transaction-cap will be used instead
+- --transaction_shutdown_grace_period Use --shutdown_grace_period instead
+- --queryserver-config-max-dml-rows
+- --queryserver-config-allowunsafe-dmls
+- --pool-name-prefix
+- --enable-autocommit Autocommit is always allowed
+
+#### vttablet startup flag deprecations
+- `--enable-query-plan-field-caching` has been deprecated. It will be removed in v16.
+- `--enable_semi_sync` has been deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`
+- `--queryserver-config-pool-prefill-parallelism`, `--queryserver-config-stream-pool-prefill-parallelism` and `--queryserver-config-transaction-prefill-parallelism` have all been deprecated. They will be removed in v16.
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+#### vtbackup flag deprecations
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+### VTGate
+
+#### vtgate --mysql-server-pool-conn-read-buffers
+
+`--mysql-server-pool-conn-read-buffers` enables pooling of buffers used to read from incoming
+connections, similar to the way pooling happens for write buffers. Defaults to off.
+
+### VDiff v2
+
+#### Resume Workflow
+
+We introduced the ability to resume a VDiff2 workflow:
+```
+$ vtctlclient --server=localhost:15999 VDiff --v2 customer.commerce2customer resume 4c664dc2-eba9-11ec-9ef7-920702940ee0
+VDiff 4c664dc2-eba9-11ec-9ef7-920702940ee0 resumed on target shards, use show to view progress
+
+$ vtctlclient --server=localhost:15999 VDiff --v2 customer.commerce2customer show last
+
+VDiff Summary for customer.commerce2customer (4c664dc2-eba9-11ec-9ef7-920702940ee0)
+State: completed
+RowsCompared: 196
+HasMismatch: false
+StartedAt: 2022-06-26 22:44:29
+CompletedAt: 2022-06-26 22:44:31
+
+Use "--format=json" for more detailed output.
+
+$ vtctlclient --server=localhost:15999 VDiff --v2 --format=json customer.commerce2customer show last
+{
+ "Workflow": "commerce2customer",
+ "Keyspace": "customer",
+ "State": "completed",
+ "UUID": "4c664dc2-eba9-11ec-9ef7-920702940ee0",
+ "RowsCompared": 196,
+ "HasMismatch": false,
+ "Shards": "0",
+ "StartedAt": "2022-06-26 22:44:29",
+ "CompletedAt": "2022-06-26 22:44:31"
+}
+```
+
+We leverage this resume capability to automatically restart a VDiff2 workflow that encountered a retryable error.
+
+We also made a number of other enhancements like progress reporting and features needed to make it a full replacement for VDiff v1. You can see more details in the tracking ticket for the VDiff2 feature complete target: https://github.com/vitessio/vitess/issues/10494
+
+Now that VDiff v2 is feature complete in 15.0, we hope to make it GA in 16.0.
+
+Please see the VDiff2 [documentation](https://vitess.io/docs/15.0/reference/vreplication/vdiff2/) for additional information.
+
+### New command line flags and behavior
+
+#### vtctl GetSchema --table-schema-only
+
+The new flag `--table-schema-only` skips column introspection. `GetSchema` only returns general schema analysis, and specifically it includes the `CREATE TABLE|VIEW` statement in the `schema` field.
+
+#### Support for additional compressors and decompressors during backup & restore
+Backup/Restore now allow you many more options for compression and decompression instead of relying on the default compressor(`pargzip`).
+There are some built-in compressors which you can use out-of-the-box. Users will need to evaluate which option works best for their
+use-case. Here are the flags that control this feature
+
+- `--compression-engine-name`
+- `--external-compressor`
+- `--external-decompressor`
+- `--external-compressor-extension`
+- `--compression-level`
+
+`--compression-engine-name` specifies the engine used for compression. It can have one of the following values
+
+- pargzip (Default)
+- pgzip
+- lz4
+- zstd
+- external
+
+If you want to use any of the built-in compressors, simply set one of the above values other than `external` for `--compression-engine-name`. The value
+specified in `--compression-engine-name` is saved in the backup MANIFEST, which is later read by the restore process to decide which
+engine to use for decompression. Default value for engine is 'pargzip'.
+
+If you would like to use a custom command or external tool for compression/decompression then you need to provide the full command with
+arguments to the `--external-compressor` and `--external-decompressor` flags. `--external-compressor-extension` flag also needs to be provided
+so that compressed files are created with the correct extension. If the external command is not using any of the built-in compression engines
+(i.e. pgzip, pargzip, lz4 or zstd) then you need to set `--compression-engine-name` to value 'external'.
+
+Please note that if you want to keep the current behavior then you don't need to provide any of these flags.
+You can read more about backup & restore [here] (https://vitess.io/docs/15.0/user-guides/operating-vitess/backup-and-restore/).
+
+If you decided to switch from an external compressor to one of the built-in supported compressors (i.e. pgzip, pargzip, lz4 or zstd) at any point
+in the future, you will need to do it in two steps.
+
+- step #1, set `--external-compressor` and `--external-compressor-extension` flag values to empty and change `--compression-engine-name` to desired value.
+- Step #2, after at least one cycle of backup with new configuration, you can set `--external-decompressor` flag value to empty.
+
+The reason you cannot change all the values together is because the restore process will then have no way to find out which external decompressor
+should be used to process the previous backup. Please make sure you have thought out all possible scenarios for restore before transitioning from one
+compression engine to another.
+
+#### Independent OLAP and OLTP transactional timeouts
+
+`--queryserver-config-olap-transaction-timeout` specifies the timeout applied
+to a transaction created within an OLAP workload. The default value is `30`
+seconds, but this can be raised, lowered, or set to zero to disable the timeout
+altogether.
+
+Until now, while OLAP queries would bypass the query timeout, transactions
+created within an OLAP session would be rolled back
+`--queryserver-config-transaction-timeout` seconds after the transaction was
+started.
+
+As of now, OLTP and OLAP transaction timeouts can be configured independently of each
+other.
+
+The main use case is to run queries spanning a long period of time which
+require transactional guarantees such as consistency or atomicity.
+
+#### Support for specifying group information in calls to VTGate
+
+`--grpc-use-effective-groups` allows non-SSL callers to specify groups information for a caller.
+Until now, you could only specify the caller-id for the security context used to authorize queries.
+As of now, you can specify the principal of the caller, and any groups they belong to.
+
+### Online DDL changes
+
+#### Concurrent vitess migrations
+
+All Online DDL migrations using the `vitess` strategy are now eligible to run concurrently, given `--allow-concurrent` DDL strategy flag. Until now, only `CREATE`, `DROP` and `REVERT` migrations were eligible, and now `ALTER` migrations are supported, as well. The terms for `ALTER` migrations concurrency:
+
+- DDL strategy must be `vitess --allow-concurent ...`
+- No two migrations can run concurrently on the same table
+- No two `ALTER`s will copy table data concurrently
+- A concurrent `ALTER` migration will not start if another `ALTER` is running and is not `ready_to_complete`
+
+The main use case is to run multiple concurrent migrations, all with `--postpone-completion`. All table-copy operations will run sequentially, but no migration will actually cut-over, and eventually all migrations will be `ready_to_complete`, continuously tailing the binary logs and keeping up-to-date. A quick and iterative `ALTER VITESS_MIGRATION '...' COMPLETE` sequence of commands will cut-over all migrations _closely together_ (though not atomically together).
+
+#### vtctl command changes.
+All `online DDL show` commands can now be run with a few additional parameters
+- `--order` , order migrations in the output by either ascending or descending order of their `id` fields.
+- `--skip` , skip specified number of migrations in the output.
+- `--limit` , limit results to a specified number of migrations in the output.
+
+#### New syntax
+
+The following is now supported:
+
+```sql
+ALTER VITESS_MIGRATION COMPLETE ALL
+```
+
+This works on all pending migrations (`queued`, `ready`, `running`) and internally issues a `ALTER VITESS_MIGRATION '' COMPLETE` for each one. The command is useful for completing multiple concurrent migrations (see above) that are open-ended (`--postpone-completion`).
+
+### Tablet Throttler
+
+#### API changes
+
+API endpoint `/debug/vars` now exposes throttler metrics, such as number of hits and errors per app per check type. Example:
+
+```shell
+$ curl -s http://127.0.0.1:15100/debug/vars | jq . | grep Throttler
+ "ThrottlerAggregatedMysqlSelf": 0.191718,
+ "ThrottlerAggregatedMysqlShard": 0.960054,
+ "ThrottlerCheckAnyError": 27,
+ "ThrottlerCheckAnyMysqlSelfError": 13,
+ "ThrottlerCheckAnyMysqlSelfTotal": 38,
+ "ThrottlerCheckAnyMysqlShardError": 14,
+ "ThrottlerCheckAnyMysqlShardTotal": 42,
+ "ThrottlerCheckAnyTotal": 80,
+ "ThrottlerCheckMysqlSelfSecondsSinceHealthy": 0,
+ "ThrottlerCheckMysqlShardSecondsSinceHealthy": 0,
+ "ThrottlerProbesLatency": 355523,
+ "ThrottlerProbesTotal": 74,
+```
+
+### Mysql Compatibility
+
+#### System Settings
+Vitess has had support for system settings from release 7.0 onwards, but this support came with some caveats.
+As soon as a client session changes a default system setting, a mysql connection gets reserved for it.
+This can sometimes lead to clients running out of mysql connections.
+Users were instructed to minimize the use of this feature and to try to set the desired system settings as defaults in the mysql config.
+
+With this release, Vitess can handle system settings changes in a much better way and clients can use them more freely.
+Vitess now has the ability to pool changed settings without reserving connections for any particular session.
+
+This feature can be enabled by setting `queryserver-enable-settings-pool` flag on the vttablet. It is disabled by default.
+In future releases, we will make this flag enabled by default.
+
+#### Lookup Vindexes
+
+Lookup vindexes now support a new parameter `multi_shard_autocommit`. If this is set to `true`, lookup vindex dml queries will be sent as autocommit to all shards instead of being wrapped in a transaction.
+This is different from the existing `autocommit` parameter where the query is sent in its own transaction separate from the ongoing transaction if any i.e. begin -> lookup query execs -> commit/rollback
+
+### Durability Policy
+
+#### Cross Cell
+
+A new durability policy `cross_cell` is now supported. `cross_cell` durability policy only allows replica tablets from a different cell than the current primary to
+send semi-sync ACKs. This ensures that any committed write exists in at least 2 tablets belonging to different cells.
+
+### New EXPLAIN format
+
+#### FORMAT=vtexplain
+
+With this new `explain` format, you can get an output that is very similar to the command line `vtexplain` app, but from a running `vtgate`, through a MySQL query.
+
+### VTOrc
+
+#### Old UI Removal and Replacement
+
+The old UI that VTOrc inherited from `Orchestrator` has been removed. A replacement UI, more consistent with the other Vitess binaries has been created.
+In order to use the new UI, `--port` flag has to be provided.
+
+Along with the UI, the old APIs have also been deprecated. However, some of them have been ported over to the new UI -
+
+| Old API | New API | Additional notes |
+|----------------------------------|----------------------------------|-----------------------------------------------------------------------|
+| `/api/problems` | `/api/problems` | The new API also supports filtering using the keyspace and shard name |
+| `/api/disable-global-recoveries` | `/api/disable-global-recoveries` | Functionally remains the same |
+| `/api/enable-global-recoveries` | `/api/enable-global-recoveries` | Functionally remains the same |
+| `/api/health` | `/debug/health` | Functionally remains the same |
+| `/api/replication-analysis` | `/api/replication-analysis` | Functionally remains the same. Output is now JSON format. |
+
+Apart from these APIs, we also now have `/debug/status`, `/debug/vars` and `/debug/liveness` available in the new UI.
+
+#### Configuration Refactor and New Flags
+
+Since VTOrc was forked from `Orchestrator`, it inherited a lot of configurations that don't make sense for the Vitess use-case.
+All of such configurations have been removed.
+
+VTOrc ignores the configurations that it doesn't understand. So old configurations can be kept around on upgrading and won't cause any issues.
+They will just be ignored.
+
+For all the configurations that are kept, flags have been added for them and the flags are the desired way to pass these configurations going forward.
+The config file will be deprecated and removed in upcoming releases. The following is a list of all the configurations that are kept and the associated flags added.
+
+| Configurations Kept | Flags Introduced |
+|:-------------------------------------:|:-------------------------------------:|
+| SQLite3DataFile | `--sqlite-data-file` |
+| InstancePollSeconds | `--instance-poll-time` |
+| SnapshotTopologiesIntervalHours | `--snapshot-topology-interval` |
+| ReasonableReplicationLagSeconds | `--reasonable-replication-lag` |
+| AuditLogFile | `--audit-file-location` |
+| AuditToSyslog | `--audit-to-backend` |
+| AuditToBackendDB | `--audit-to-syslog` |
+| AuditPurgeDays | `--audit-purge-duration` |
+| RecoveryPeriodBlockSeconds | `--recovery-period-block-duration` |
+| PreventCrossDataCenterPrimaryFailover | `--prevent-cross-cell-failover` |
+| LockShardTimeoutSeconds | `--lock-shard-timeout` |
+| WaitReplicasTimeoutSeconds | `--wait-replicas-timeout` |
+| TopoInformationRefreshSeconds | `--topo-information-refresh-duration` |
+| RecoveryPollSeconds | `--recovery-poll-duration` |
+
+Apart from configurations, some flags from VTOrc have also been removed -
+- `sibling`
+- `destination`
+- `discovery`
+- `skip-unresolve`
+- `skip-unresolve-check`
+- `noop`
+- `binlog`
+- `statement`
+- `grab-election`
+- `promotion-rule`
+- `skip-continuous-registration`
+- `enable-database-update`
+- `ignore-raft-setup`
+- `tag`
+
+The ideal way to ensure backward compatibility is to remove the flags listed above while on the previous release. Then upgrade VTOrc.
+After upgrading, remove the config file and instead pass the flags that are introduced.
+
+#### Example Upgrade
+
+If you are running VTOrc with the flags `--ignore-raft-setup --clusters_to_watch="ks/0" --config="path/to/config"` and the following configuration
+```json
+{
+ "Debug": true,
+ "ListenAddress": ":6922",
+ "MySQLTopologyUser": "orc_client_user",
+ "MySQLTopologyPassword": "orc_client_user_password",
+ "MySQLReplicaUser": "vt_repl",
+ "MySQLReplicaPassword": "",
+ "RecoveryPeriodBlockSeconds": 1,
+ "InstancePollSeconds": 1,
+ "PreventCrossDataCenterPrimaryFailover": true
+}
+```
+First drop the flag `--ignore-raft-setup` while on the previous release. So, you'll be running VTOrc with `--clusters_to_watch="ks/0" --config="path/to/config"` and the same configuration listed above.
+
+Now you can upgrade your VTOrc version continuing to use the same flags and configurations, and it will continue to work just the same. If you wish to use the new UI, then you can add the `--port` flag as well.
+
+After upgrading, you can drop the configuration entirely and use the new flags like `--clusters_to_watch="ks/0" --recovery-period-block-duration=1s --instance-poll-time=1s --prevent-cross-cell-failover`
+
+#### Default Configuration Files
+
+The default files that VTOrc searches for configurations in have also changed from `"/etc/orchestrator.conf.json", "conf/orchestrator.conf.json", "orchestrator.conf.json"` to
+`"/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json"`.
+
+### Flags Restructure
+
+#### Flags Diff
+
+In addition to these major streams of work in release-15.0, we have made tremendous progress on [VEP-4, aka The Flag Situation](https://github.com/vitessio/enhancements/blob/main/veps/vep-4.md), reorganizing our code so that Vitess binaries and their flags are
+clearly aligned in help text. An immediate win for usability, this positions us well to move on to a [viper](https://github.com/spf13/viper) implementation which will facilitate additional improvements including standardization of flag syntax and runtime configuration reloads.
+We are also aligning with industry standards regarding the use of flags, ensuring a seamless experience for users migrating from or integrating with other platforms.
+Below are the changes for each binary.
+- [mysqlctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctl.diff)
+- [mysqlctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctld.diff)
+- [vtaclcheck](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff)
+- [vtadmin](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtadmin.diff)
+- [vtctlclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctlclient.diff)
+- [vtctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctld.diff)
+- [vtctldclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctldclient.diff)
+- [vtexplain](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtexplain.diff)
+- [vtgate](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgate.diff)
+- [vtgtr](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgtr.diff)
+- [vtorc](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtorc.diff)
+- [vttablet](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttablet.diff)
+- [vttestserver](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttestserver.diff)
+- [vttlstest](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttlstest.diff)
+- [zk](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zk.diff)
+- [zkctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctl.diff)
+- [zkctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctld.diff)
+
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.0/changelog.md).
+
+The release includes 595 commits (excluding merges)
+
+Thanks to all our contributors: @Abirdcfly, @DeathBorn, @GuptaManan100, @K-Kumar-01, @L3o-pold, @Phanatic, @Weijun-H, @ajm188, @arthurschreiber, @arvind-murty, @brirams, @dbussink, @deepthi, @dependabot[bot], @doeg, @frouioui, @harshit-gangal, @mattlord, @maxenglander, @mgale, @notfelineit, @ofiriluz, @olyazavr, @quinox, @rafer, @renatolabs, @rohit-nayak-ps, @rsajwani, @rvrangel, @saunderst, @shlomi-noach, @systay, @vitess-bot[bot], @vmg, @yoheimuta
+
diff --git a/doc/releasenotes/15_0_0_summary.md b/changelog/15.0/15.0.0/summary.md
similarity index 58%
rename from doc/releasenotes/15_0_0_summary.md
rename to changelog/15.0/15.0.0/summary.md
index 0ca9ccee6c6..56b784b55dd 100644
--- a/doc/releasenotes/15_0_0_summary.md
+++ b/changelog/15.0/15.0.0/summary.md
@@ -1,23 +1,58 @@
## Summary
-- [Vindex Interface](#vindex-interface)
-- [LogStats Table and Keyspace deprecated](#logstats-table-and-keyspace-deprecated)
-- [Command-line syntax deprecations](#command-line-syntax-deprecations)
-- [New command line flags and behavior](#new-command-line-flags-and-behavior)
-- [Online DDL changes](#online-ddl-changes)
-- [Tablet throttler](#tablet-throttler)
-- [VDiff2](#vdiff2)
-- [Mysql Compatibility](#mysql-compatibility)
-- [Durability Policy](#durability-policy)
-- [New EXPLAIN format](#new-explain-format)
-
-## Known Issues
+- **[Known Issues](#known-issues)**
+- **[Breaking Changes](#breaking-changes)**
+ - [Flags](#flags)
+ - [VTTablet Flag Deletions](#vttablet-flag-deletions)
+ - [Vindex Interface](#vindex-interface)
+- **[Deprecations](#deprecations)**
+ - [LogStats Table and Keyspace Deprecated](#logstats-table-and-keyspace-deprecated)
+ - [Orchestrator Integration Deprecation](#orchestrator-integration-deprecation)
+ - [Connection Pool Prefill](#connection-pool-prefill)
+ - [InitShardPrimary Deprecation](#initshardprimary-deprecation)
+- **[Command-Line Syntax Deprecations](#command-line-syntax-deprecations)**
+ - [VTTablet Startup Flag Deletions](#vttablet-startup-flag-deletions)
+ - [VTTablet Startup Flag Deprecations](#vttablet-startup-flag-deprecations)
+ - [VTBackup Flag Deprecations](#vtbackup-flag-deprecations)
+- **[VTGate](#vtgate)**
+ - [vtgate --mysql-server-pool-conn-read-buffers](#vtgate--mysql-server-pool-conn-read-buffers)
+- **[VDiff2](#vdiff2)**
+ - [Resume Workflow](#resume-workflow)
+- **[New command line flags and behavior](#new-command-line)**
+ - [vtctl GetSchema --table-schema-only](#vtctl-getschema--table-schema-only)
+ - [Support for Additional Compressors and Decompressors During Backup & Restore](#support-for-additional-compressors-and-decompressors-during-backup-&-restore)
+ - [Independent OLAP and OLTP Transactional Timeouts](#independant-olap-and-oltp-transactional-timeouts)
+ - [Support for Specifying Group Information in Calls to VTGate](#support-for-specifying-group-information-in-calls-to-vtgate)
+- **[Online DDL Changes](#online-ddl-changes)**
+ - [Concurrent Vitess Migrations](#concurrent-vitess-migrations)
+ - [VTCtl Command Changes](#vtctl-command-changes)
+ - [New Syntax](#new-syntax)
+- **[Tablet Throttler](#tablet-throttler)**
+ - [API Changes](#api-changes)
+- **[Mysql Compatibility](#mysql-compatibility)**
+ - [System Settings](#system-settings)
+ - [Lookup Vindexes](#lookup-vindexes)
+- **[Durability Policy](#durability-policy)**
+ - [Cross Cell](#cross-cell)
+- **[New EXPLAIN Format](#new-explain-format)**
+ - [FORMAT=vtexplain](#formatvtexplain)
+- **[VTOrc](#vtorc)**
+ - [Old UI Removal and Replacement](#old-ui-removal-and-replacement)
+ - [Configuration Refactor and New Flags](#configuratoin-refactor-and-new-flags)
+ - [Example Upgrade](#example-upgrade)
+ - [Default Configuration Files](#default-configuration-files)
+- **[Flags Restructure](#flags-restructure)**
+ - [Flags Diff](#flags-diff)
+
+## Known Issues
+
+- [Corrupted results for non-full-group-by queries with JOINs](https://github.com/vitessio/vitess/issues/11625). This can be resolved by using full-group-by queries.
## Major Changes
-### Breaking Changes
+### Breaking Changes
-#### Flags
+#### Flags
- The deprecated `--cpu_profile` flag has been removed. Please use the `--pprof` flag instead.
- The deprecated `--mem-profile-rate` flag has been removed. Please use `--pprof=mem` instead.
@@ -25,8 +60,21 @@
- The deprecated vtgate/vtexplain/vtcombo flag `--planner_version` has been removed. Please use `--planner-version` instead.
- The deprecated flag `--master_connect_retry` has been removed. Please use `--replication_connect_retry` instead.
- `vtctl` commands that take shard names and ranges as positional arguments (e.g. `vtctl Reshard ks.workflow -80 -40,40-80`) need to have their positional arguments separated from their flag arguments by a double-dash separator to avoid the new parsing library from mistaking them as flags (e.g. `vtctl Reshard ks.workflow -- -80 -40,40-80`).
+- The `--cell` flag in the `vtgate` binary no longer has a default value. It is a required argument that has to be specified for the binary to run. Please explicitly specify the flag, if dependent on the flag's default value.
+- The `--db-config-*-*` VTTablet flags were deprecated in `v3.0.0`. They have now been deleted as part of this release. You must use `--db_dba_*` now.
-#### Vindex Interface
+#### vttablet Flag Deletions
+The following VTTablet flags were deprecated in 7.0. They have now been deleted
+- `--queryserver-config-message-conn-pool-size`
+- `--queryserver-config-message-conn-pool-prefill-parallelism`
+- `--client-found-rows-pool-size` A different existing flag `--queryserver-config-transaction-cap` will be used instead
+- `--transaction_shutdown_grace_period` Use `--shutdown_grace_period` instead
+- `--queryserver-config-max-dml-rows`
+- `--queryserver-config-allowunsafe-dmls`
+- `--pool-name-prefix`
+- `--enable-autocommit` Autocommit is always allowed
+
+#### Vindex Interface
All the vindex interface methods are changed by adding `context.Context` as an input parameter.
@@ -40,26 +88,30 @@ Map(ctx context.Context, vcursor VCursor, .... ) ....
This only affects users who have added their own custom vindex implementation.
They are required to change their implementation with these new interface method expectations.
-#### LogStats Table and Keyspace deprecated
+### Deprecations
+
+#### LogStats Table and Keyspace deprecated
Information about which tables are used was being reported through the `Keyspace` and `Table` fields on LogStats.
For multi-table queries, this output can be confusing, so we have added `TablesUsed`, that is a string array, listing all tables and which keyspace they are on.
`Keyspace` and `Table` fields are deprecated and will be removed in the v16 release of Vitess.
-#### Orchestrator Integration Deprecation
+#### Orchestrator Integration Deprecation
Orchestrator integration in `vttablet` has been deprecated. It will continue to work in this release but is liable to be removed in future releases.
Consider using VTOrc instead of Orchestrator as VTOrc goes GA in this release.
-#### Connection Pool Prefill
+#### Connection Pool Prefill
The connection pool with prefilled connections have been removed. The pool now does lazy connection creation.
-Following flags are deprecated: `queryserver-config-pool-prefill-parallelism`, `queryserver-config-stream-pool-prefill-parallelism`, `queryserver-config-transaction-prefill-parallelism`
-and will be removed in future version.
-### Command-line syntax deprecations
+#### InitShardPrimary Deprecation
-#### vttablet startup flag deletions
+The vtcltd command InitShardPrimary has been deprecated. Please use PlannedReparentShard instead.
+
+### Command-line syntax deprecations
+
+#### vttablet startup flag deletions
The following VTTablet flags were deprecated in 7.0. They have now been deleted
- --queryserver-config-message-conn-pool-size
- --queryserver-config-message-conn-pool-prefill-parallelism
@@ -70,19 +122,25 @@ The following VTTablet flags were deprecated in 7.0. They have now been deleted
- --pool-name-prefix
- --enable-autocommit Autocommit is always allowed
-#### vttablet startup flag deprecations
-- --enable-query-plan-field-caching is now deprecated. It will be removed in v16.
-- --enable_semi_sync is now deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`
-- --queryserver-config-pool-prefill-parallelism, --queryserver-config-stream-pool-prefill-parallelism and --queryserver-config-transaction-prefill-parallelism have all been deprecated. They will be removed in v16.
+#### vttablet startup flag deprecations
+- `--enable-query-plan-field-caching` has been deprecated. It will be removed in v16.
+- `--enable_semi_sync` has been deprecated. It will be removed in v16. Instead, set the correct durability policy using `SetKeyspaceDurabilityPolicy`
+- `--queryserver-config-pool-prefill-parallelism`, `--queryserver-config-stream-pool-prefill-parallelism` and `--queryserver-config-transaction-prefill-parallelism` have all been deprecated. They will be removed in v16.
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
+
+#### vtbackup flag deprecations
+- `--backup_storage_hook` has been deprecated, consider using one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.
-### New command line flags and behavior
+### VTGate
-#### vtgate --mysql-server-pool-conn-read-buffers
+#### vtgate --mysql-server-pool-conn-read-buffers
`--mysql-server-pool-conn-read-buffers` enables pooling of buffers used to read from incoming
connections, similar to the way pooling happens for write buffers. Defaults to off.
-### VDiff2
+### VDiff v2
+
+#### Resume Workflow
We introduced the ability to resume a VDiff2 workflow:
```
@@ -114,24 +172,30 @@ $ vtctlclient --server=localhost:15999 VDiff --v2 --format=json customer.commerc
}
```
+We leverage this resume capability to automatically restart a VDiff2 workflow that encountered a retryable error.
+
+We also made a number of other enhancements like progress reporting and features needed to make it a full replacement for VDiff v1. You can see more details in the tracking ticket for the VDiff2 feature complete target: https://github.com/vitessio/vitess/issues/10494
+
+Now that VDiff v2 is feature complete in 15.0, we hope to make it GA in 16.0.
+
Please see the VDiff2 [documentation](https://vitess.io/docs/15.0/reference/vreplication/vdiff2/) for additional information.
-### New command line flags and behavior
+### New command line flags and behavior
-#### vtctl GetSchema --table-schema-only
+#### vtctl GetSchema --table-schema-only
The new flag `--table-schema-only` skips column introspection. `GetSchema` only returns general schema analysis, and specifically it includes the `CREATE TABLE|VIEW` statement in the `schema` field.
-#### Support for additional compressors and decompressors during backup & restore
-Backup/Restore now allow you many more options for compression and decompression instead of relying on the default compressor(`pgzip`).
+#### Support for additional compressors and decompressors during backup & restore
+Backup/Restore now allow you many more options for compression and decompression instead of relying on the default compressor(`pargzip`).
There are some built-in compressors which you can use out-of-the-box. Users will need to evaluate which option works best for their
use-case. Here are the flags that control this feature
-- --compression-engine-name
-- --external-compressor
-- --external-decompressor
-- --external-compressor-extension
-- --compression-level
+- `--compression-engine-name`
+- `--external-compressor`
+- `--external-decompressor`
+- `--external-compressor-extension`
+- `--compression-level`
`--compression-engine-name` specifies the engine used for compression. It can have one of the following values
@@ -141,20 +205,19 @@ use-case. Here are the flags that control this feature
- zstd
- external
-where 'external' is set only when using a custom command or tool other than the ones that are already provided.
-If you want to use any of the built-in compressors, simply set one of the above values for `--compression-engine-name`. The value
+If you want to use any of the built-in compressors, simply set one of the above values other than `external` for `--compression-engine-name`. The value
specified in `--compression-engine-name` is saved in the backup MANIFEST, which is later read by the restore process to decide which
-engine to use for decompression. Default value for engine is 'pgzip'.
+engine to use for decompression. Default value for engine is 'pargzip'.
If you would like to use a custom command or external tool for compression/decompression then you need to provide the full command with
arguments to the `--external-compressor` and `--external-decompressor` flags. `--external-compressor-extension` flag also needs to be provided
so that compressed files are created with the correct extension. If the external command is not using any of the built-in compression engines
-(i-e pgzip, pargzip, lz4 or zstd) then you need to set `--compression-engine-name` to value 'external'.
+(i.e. pgzip, pargzip, lz4 or zstd) then you need to set `--compression-engine-name` to value 'external'.
-Please note that if you want the current production behavior then you don't need to change any of these flags.
+Please note that if you want to keep the current behavior then you don't need to provide any of these flags.
You can read more about backup & restore [here] (https://vitess.io/docs/15.0/user-guides/operating-vitess/backup-and-restore/).
-If you decided to switch from an external compressor to one of the built-in supported compressors (i-e pgzip, pargzip, lz4 or zstd) at any point
+If you decided to switch from an external compressor to one of the built-in supported compressors (i.e. pgzip, pargzip, lz4 or zstd) at any point
in the future, you will need to do it in two steps.
- step #1, set `--external-compressor` and `--external-compressor-extension` flag values to empty and change `--compression-engine-name` to desired value.
@@ -164,7 +227,7 @@ The reason you cannot change all the values together is because the restore proc
should be used to process the previous backup. Please make sure you have thought out all possible scenarios for restore before transitioning from one
compression engine to another.
-#### Independent OLAP and OLTP transactional timeouts
+#### Independent OLAP and OLTP transactional timeouts
`--queryserver-config-olap-transaction-timeout` specifies the timeout applied
to a transaction created within an OLAP workload. The default value is `30`
@@ -182,15 +245,15 @@ other.
The main use case is to run queries spanning a long period of time which
require transactional guarantees such as consistency or atomicity.
-#### Support for specifying group information in calls to VTGate
+#### Support for specifying group information in calls to VTGate
`--grpc-use-effective-groups` allows non-SSL callers to specify groups information for a caller.
Until now, you could only specify the caller-id for the security context used to authorize queries.
As of now, you can specify the principal of the caller, and any groups they belong to.
-### Online DDL changes
+### Online DDL changes
-#### Concurrent vitess migrations
+#### Concurrent vitess migrations
All Online DDL migrations using the `vitess` strategy are now eligible to run concurrently, given `--allow-concurrent` DDL strategy flag. Until now, only `CREATE`, `DROP` and `REVERT` migrations were eligible, and now `ALTER` migrations are supported, as well. The terms for `ALTER` migrations concurrency:
@@ -201,13 +264,13 @@ All Online DDL migrations using the `vitess` strategy are now eligible to run co
The main use case is to run multiple concurrent migrations, all with `--postpone-completion`. All table-copy operations will run sequentially, but no migration will actually cut-over, and eventually all migrations will be `ready_to_complete`, continuously tailing the binary logs and keeping up-to-date. A quick and iterative `ALTER VITESS_MIGRATION '...' COMPLETE` sequence of commands will cut-over all migrations _closely together_ (though not atomically together).
-#### vtctl command changes.
+#### vtctl command changes.
All `online DDL show` commands can now be run with a few additional parameters
- `--order` , order migrations in the output by either ascending or descending order of their `id` fields.
- `--skip` , skip specified number of migrations in the output.
- `--limit` , limit results to a specified number of migrations in the output.
-#### New syntax
+#### New syntax
The following is now supported:
@@ -217,9 +280,9 @@ ALTER VITESS_MIGRATION COMPLETE ALL
This works on all pending migrations (`queued`, `ready`, `running`) and internally issues a `ALTER VITESS_MIGRATION '' COMPLETE` for each one. The command is useful for completing multiple concurrent migrations (see above) that are open-ended (`--postpone-completion`).
-### Tablet throttler
+### Tablet Throttler
-#### API changes
+#### API changes
API endpoint `/debug/vars` now exposes throttler metrics, such as number of hits and errors per app per check type. Example:
@@ -239,41 +302,41 @@ $ curl -s http://127.0.0.1:15100/debug/vars | jq . | grep Throttler
"ThrottlerProbesTotal": 74,
```
-### Mysql Compatibility
+### Mysql Compatibility
-#### System Settings
-Vitess supported system settings from release 7.0 onwards, but it was always with a pinch of salt.
-As soon as a client session changes a default system setting, the mysql connection gets blocked for it.
-This leads to clients running out of mysql connections.
-The clients were instructed to use this to a minimum and try to set those changed system settings as default on the mysql.
+#### System Settings
+Vitess has had support for system settings from release 7.0 onwards, but this support came with some caveats.
+As soon as a client session changes a default system setting, a mysql connection gets reserved for it.
+This can sometimes lead to clients running out of mysql connections.
+Users were instructed to minimize the use of this feature and to try to set the desired system settings as defaults in the mysql config.
-With this release, Vitess can handle system settings changes in a much better way and the clients can use it more freely.
-Vitess now pools those changed settings and does not reserve it for any particular session.
+With this release, Vitess can handle system settings changes in a much better way and clients can use them more freely.
+Vitess now has the ability to pool changed settings without reserving connections for any particular session.
This feature can be enabled by setting `queryserver-enable-settings-pool` flag on the vttablet. It is disabled by default.
In future releases, we will make this flag enabled by default.
-#### Lookup Vindexes
+#### Lookup Vindexes
Lookup vindexes now support a new parameter `multi_shard_autocommit`. If this is set to `true`, lookup vindex dml queries will be sent as autocommit to all shards instead of being wrapped in a transaction.
This is different from the existing `autocommit` parameter where the query is sent in its own transaction separate from the ongoing transaction if any i.e. begin -> lookup query execs -> commit/rollback
-### Durability Policy
+### Durability Policy
-#### Cross Cell
+#### Cross Cell
A new durability policy `cross_cell` is now supported. `cross_cell` durability policy only allows replica tablets from a different cell than the current primary to
send semi-sync ACKs. This ensures that any committed write exists in at least 2 tablets belonging to different cells.
-### New EXPLAIN format
+### New EXPLAIN format
-#### FORMAT=vtexplain
+#### FORMAT=vtexplain
With this new `explain` format, you can get an output that is very similar to the command line `vtexplain` app, but from a running `vtgate`, through a MySQL query.
-### VTOrc
+### VTOrc
-#### Old UI Removal and Replacement
+#### Old UI Removal and Replacement
The old UI that VTOrc inherited from `Orchestrator` has been removed. A replacement UI, more consistent with the other Vitess binaries has been created.
In order to use the new UI, `--port` flag has to be provided.
@@ -290,7 +353,7 @@ Along with the UI, the old APIs have also been deprecated. However, some of them
Apart from these APIs, we also now have `/debug/status`, `/debug/vars` and `/debug/liveness` available in the new UI.
-#### Configuration Refactor and New Flags
+#### Configuration Refactor and New Flags
Since VTOrc was forked from `Orchestrator`, it inherited a lot of configurations that don't make sense for the Vitess use-case.
All of such configurations have been removed.
@@ -337,7 +400,7 @@ Apart from configurations, some flags from VTOrc have also been removed -
The ideal way to ensure backward compatibility is to remove the flags listed above while on the previous release. Then upgrade VTOrc.
After upgrading, remove the config file and instead pass the flags that are introduced.
-#### Example Upgrade
+#### Example Upgrade
If you are running VTOrc with the flags `--ignore-raft-setup --clusters_to_watch="ks/0" --config="path/to/config"` and the following configuration
```json
@@ -359,7 +422,33 @@ Now you can upgrade your VTOrc version continuing to use the same flags and conf
After upgrading, you can drop the configuration entirely and use the new flags like `--clusters_to_watch="ks/0" --recovery-period-block-duration=1s --instance-poll-time=1s --prevent-cross-cell-failover`
-#### Default Configuration Files
+#### Default Configuration Files
The default files that VTOrc searches for configurations in have also changed from `"/etc/orchestrator.conf.json", "conf/orchestrator.conf.json", "orchestrator.conf.json"` to
`"/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json"`.
+
+### Flags Restructure
+
+#### Flags Diff
+
+In addition to these major streams of work in release-15.0, we have made tremendous progress on [VEP-4, aka The Flag Situation](https://github.com/vitessio/enhancements/blob/main/veps/vep-4.md), reorganizing our code so that Vitess binaries and their flags are
+clearly aligned in help text. An immediate win for usability, this positions us well to move on to a [viper](https://github.com/spf13/viper) implementation which will facilitate additional improvements including standardization of flag syntax and runtime configuration reloads.
+We are also aligning with industry standards regarding the use of flags, ensuring a seamless experience for users migrating from or integrating with other platforms.
+Below are the changes for each binary.
+- [mysqlctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctl.diff)
+- [mysqlctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/mysqlctld.diff)
+- [vtaclcheck](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff)
+- [vtadmin](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtadmin.diff)
+- [vtctlclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctlclient.diff)
+- [vtctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctld.diff)
+- [vtctldclient](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtctldclient.diff)
+- [vtexplain](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtexplain.diff)
+- [vtgate](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgate.diff)
+- [vtgtr](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtgtr.diff)
+- [vtorc](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vtorc.diff)
+- [vttablet](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttablet.diff)
+- [vttestserver](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttestserver.diff)
+- [vttlstest](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/vttlstest.diff)
+- [zk](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zk.diff)
+- [zkctl](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctl.diff)
+- [zkctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctld.diff)
diff --git a/changelog/15.0/15.0.1/changelog.md b/changelog/15.0/15.0.1/changelog.md
new file mode 100644
index 00000000000..c44b69b5e8e
--- /dev/null
+++ b/changelog/15.0/15.0.1/changelog.md
@@ -0,0 +1,43 @@
+# Changelog of Vitess v15.0.1
+
+### Bug fixes
+#### Build/CI
+ * Docker Image Context Fix [#11628](https://github.com/vitessio/vitess/pull/11628)
+ * Addition of a CI tool to detect dead links in test/config.json [#11668](https://github.com/vitessio/vitess/pull/11668)
+ * Fix files changes filtering in CI [#11714](https://github.com/vitessio/vitess/pull/11714)
+#### General
+ * [release-15.0] Fix missing flag usage for vault credentials flags (#11582) [#11583](https://github.com/vitessio/vitess/pull/11583)
+ * fix vdiff release notes [#11595](https://github.com/vitessio/vitess/pull/11595)
+#### Query Serving
+ * collations: fix coercion semantics according to 8.0.31 changes [#11487](https://github.com/vitessio/vitess/pull/11487)
+ * [bugfix] Allow VTExplain to handle shards that are not active during resharding [#11640](https://github.com/vitessio/vitess/pull/11640)
+ * [release-15.0] Do not multiply `AggregateRandom` in JOINs [#11672](https://github.com/vitessio/vitess/pull/11672)
+ * [15.0] Send errors in stream instead of a grpc error from streaming rpcs when transaction or reserved connection is acquired [#11687](https://github.com/vitessio/vitess/pull/11687)
+ * improve handling of ORDER BY/HAVING rewriting [#11691](https://github.com/vitessio/vitess/pull/11691)
+ * [release-15.0] Accept no more data in session state change as ok (#11796) [#11800](https://github.com/vitessio/vitess/pull/11800)
+ * semantics: Use a BitSet [#11819](https://github.com/vitessio/vitess/pull/11819)
+#### VTAdmin
+ * Add VTAdmin folder to release package [#11683](https://github.com/vitessio/vitess/pull/11683)
+#### vtctl
+ * Switch ApplySchema `--sql` argument to be `StringArray` instead of `StringSlice` [#11790](https://github.com/vitessio/vitess/pull/11790)
+### CI/Build
+#### Build/CI
+ * [release-15.0] Remove Launchable in the workflows [#11669](https://github.com/vitessio/vitess/pull/11669)
+ * Update test runners to run all tests including outside package [#11787](https://github.com/vitessio/vitess/pull/11787)
+ * [release-15.0] Add automation to change vitess version in the docker-release script (#11682) [#11816](https://github.com/vitessio/vitess/pull/11816)
+#### Governance
+ * codeowners: have at least two for almost every package [#11639](https://github.com/vitessio/vitess/pull/11639)
+#### Query Serving
+ * [release-15.0] Consistent sorting in Online DDL Vrepl suite test (#11821) [#11828](https://github.com/vitessio/vitess/pull/11828)
+#### VReplication
+ * update jsonparser dependency [#11694](https://github.com/vitessio/vitess/pull/11694)
+### Release
+#### General
+ * Release of v15.0.0 [#11573](https://github.com/vitessio/vitess/pull/11573)
+ * Back to dev mode after v15.0.0 [#11574](https://github.com/vitessio/vitess/pull/11574)
+ * fix anchors for release notes and summary [#11578](https://github.com/vitessio/vitess/pull/11578)
+ * Mention the `--db-config-*-*` flag in the release notes [#11610](https://github.com/vitessio/vitess/pull/11610)
+### Testing
+#### Build/CI
+ * [release-15.0] Use `go1.19.3` in the upgrade/downgrade tests [#11676](https://github.com/vitessio/vitess/pull/11676)
+
diff --git a/changelog/15.0/15.0.1/release_notes.md b/changelog/15.0/15.0.1/release_notes.md
new file mode 100644
index 00000000000..1737f0fd2f8
--- /dev/null
+++ b/changelog/15.0/15.0.1/release_notes.md
@@ -0,0 +1,24 @@
+# Release of Vitess v15.0.1
+## Major Changes
+
+### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+
+### VtAdmin web folder is missing while installing Vitess with local method
+
+When we try to install Vitess locally (https://vitess.io/docs/15.0/get-started/local/#install-vitess) on `v15.0`, we are getting the following error
+```
+npm ERR! enoent ENOENT: no such file or directory, open '/home/web/vtadmin/package.json'
+```
+This issue is fixed in 15.0.1. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11679), and its fix [here](https://github.com/vitessio/vitess/pull/11683).
+
+------------
+
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.1/changelog.md).
+
+The release includes 25 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @frouioui, @harshit-gangal, @rsajwani, @vitess-bot[bot]
+
diff --git a/changelog/15.0/15.0.1/summary.md b/changelog/15.0/15.0.1/summary.md
new file mode 100644
index 00000000000..66a5763180f
--- /dev/null
+++ b/changelog/15.0/15.0.1/summary.md
@@ -0,0 +1,14 @@
+## Major Changes
+
+### Corrupted results for non-full-group-by queries with JOINs
+
+An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN
+is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633).
+
+### VtAdmin web folder is missing while installing Vitess with local method
+
+When we try to install Vitess locally (https://vitess.io/docs/15.0/get-started/local/#install-vitess) on `v15.0`, we are getting the following error
+```
+npm ERR! enoent ENOENT: no such file or directory, open '/home/web/vtadmin/package.json'
+```
+This issue is fixed in 15.0.1. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11679), and its fix [here](https://github.com/vitessio/vitess/pull/11683).
\ No newline at end of file
diff --git a/changelog/15.0/15.0.2/changelog.md b/changelog/15.0/15.0.2/changelog.md
new file mode 100644
index 00000000000..29aff8fa2f7
--- /dev/null
+++ b/changelog/15.0/15.0.2/changelog.md
@@ -0,0 +1,27 @@
+# Changelog of Vitess v15.0.2
+
+### Bug fixes
+#### Query Serving
+ * Online DDL: fix 'vtctlclient OnlineDDL' template queries [#11889](https://github.com/vitessio/vitess/pull/11889)
+ * Fix CheckMySQL by setting the correct wanted state [#11895](https://github.com/vitessio/vitess/pull/11895)
+ * bugfix: allow predicates without dependencies with derived tables to be handled correctly [#11911](https://github.com/vitessio/vitess/pull/11911)
+ * [release-15.0] Fix sending a ServerLost error when reading a packet fails (#11920) [#11930](https://github.com/vitessio/vitess/pull/11930)
+ * Skip `TestSubqueriesExists` during upgrade-downgrade tests [#11953](https://github.com/vitessio/vitess/pull/11953)
+#### VReplication
+ * VReplication: Prevent Orphaned VDiff2 Jobs [#11768](https://github.com/vitessio/vitess/pull/11768)
+### CI/Build
+#### Build/CI
+ * Fix deprecated usage of set-output [#11844](https://github.com/vitessio/vitess/pull/11844)
+ * Use `go1.19.4` in the next release upgrade downgrade E2E tests [#11924](https://github.com/vitessio/vitess/pull/11924)
+#### TabletManager
+ * Fix closing the body for HTTP requests [#11842](https://github.com/vitessio/vitess/pull/11842)
+### Enhancement
+#### General
+ * Upgrade to `go1.18.9` [#11897](https://github.com/vitessio/vitess/pull/11897)
+### Release
+#### General
+ * Release of v15.0.1 [#11847](https://github.com/vitessio/vitess/pull/11847)
+ * Back to dev mode after v15.0.1 [#11848](https://github.com/vitessio/vitess/pull/11848)
+ * updating summary and release notes for v15.0.1 [#11852](https://github.com/vitessio/vitess/pull/11852)
+ * Update the release `15.0.2` summary doc [#11954](https://github.com/vitessio/vitess/pull/11954)
+
diff --git a/changelog/15.0/15.0.2/release_notes.md b/changelog/15.0/15.0.2/release_notes.md
new file mode 100644
index 00000000000..33ece0e1c73
--- /dev/null
+++ b/changelog/15.0/15.0.2/release_notes.md
@@ -0,0 +1,20 @@
+# Release of Vitess v15.0.2
+## Major Changes
+
+### Upgrade to `go1.18.9`
+
+Vitess `v15.0.2` now runs on `go1.18.9`.
+The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fixe to `net/http` package, which is use extensively by Vitess.
+Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU).
+
+> go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages.
+
+
+------------
+
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.2/changelog.md).
+
+The release includes 14 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @dbussink, @frouioui, @mattlord, @rsajwani, @shlomi-noach, @vitess-bot[bot]
+
diff --git a/changelog/15.0/15.0.2/summary.md b/changelog/15.0/15.0.2/summary.md
new file mode 100644
index 00000000000..b12a97879a5
--- /dev/null
+++ b/changelog/15.0/15.0.2/summary.md
@@ -0,0 +1,10 @@
+## Major Changes
+
+### Upgrade to `go1.18.9`
+
+Vitess `v15.0.2` now runs on `go1.18.9`.
+The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fix to `net/http` package, which is used extensively by Vitess.
+Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU).
+
+> go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages.
+
diff --git a/changelog/15.0/15.0.3/changelog.md b/changelog/15.0/15.0.3/changelog.md
new file mode 100644
index 00000000000..5634b7aa24a
--- /dev/null
+++ b/changelog/15.0/15.0.3/changelog.md
@@ -0,0 +1,89 @@
+# Changelog of Vitess v15.0.3
+
+### Bug fixes
+#### Backup and Restore
+ * mysqlctl: flags should be added to vtbackup [#12048](https://github.com/vitessio/vitess/pull/12048)
+#### Build/CI
+ * Fix `codeql` workflow timeout issue [#11760](https://github.com/vitessio/vitess/pull/11760)
+ * [release-15.0] Use `go1.20.1` in upgrade/downgrade tests [#12512](https://github.com/vitessio/vitess/pull/12512)
+#### CLI
+ * Purge logs without panicking [#12187](https://github.com/vitessio/vitess/pull/12187)
+ * Fix `vtctldclient`'s Root command to return an error on unknown command [#12481](https://github.com/vitessio/vitess/pull/12481)
+#### Cluster management
+ * Skip `TestReparentDoesntHangIfPrimaryFails` in vttablet v16 and above [#12387](https://github.com/vitessio/vitess/pull/12387)
+ * Fix initialization code to also stop replication to prevent crash [#12534](https://github.com/vitessio/vitess/pull/12534)
+#### Observability
+ * Reset the current lag when closing the replication lag reader. [#12683](https://github.com/vitessio/vitess/pull/12683)
+#### Online DDL
+ * Bugfix/Backport to v15: Fix schema migrations requested_timestamp zero values [#12263](https://github.com/vitessio/vitess/pull/12263)
+ * Mysqld.GetSchema: tolerate tables being dropped while inspecting schema [#12641](https://github.com/vitessio/vitess/pull/12641)
+#### Operator
+ * Fix rbac config in the vtop example [#12034](https://github.com/vitessio/vitess/pull/12034)
+#### Query Serving
+ * [release-15.0] only expand when we have full information (#11998) [#12002](https://github.com/vitessio/vitess/pull/12002)
+ * Fix: Date math with Interval keyword [#12082](https://github.com/vitessio/vitess/pull/12082)
+ * BugFix: Cast expression translation by evaluation engine [#12111](https://github.com/vitessio/vitess/pull/12111)
+ * [Gen4] Fix lookup vindexes with `autocommit` enabled [#12172](https://github.com/vitessio/vitess/pull/12172)
+ * VTGate: Ensure HealthCheck Cache Secondary Maps Stay in Sync With Authoritative Map on Tablet Delete [#12178](https://github.com/vitessio/vitess/pull/12178)
+ * Fix aggregation on outer joins [#12298](https://github.com/vitessio/vitess/pull/12298)
+ * [release-15.0] fix: added null safe operator precendence rule (#12297) [#12306](https://github.com/vitessio/vitess/pull/12306)
+ * [release-15.0] Fix bug in vtexplain around JOINs (#12376) [#12383](https://github.com/vitessio/vitess/pull/12383)
+ * Fix scalar aggregation engine primitive for column truncation [#12468](https://github.com/vitessio/vitess/pull/12468)
+ * [release-16.0] BugFix: Unsharded query using a derived table and a dual table [#12484](https://github.com/vitessio/vitess/pull/12484)
+ * [bug fix] USING planning on information_schema [#12542](https://github.com/vitessio/vitess/pull/12542)
+ * handle filter on top of UNION [#12543](https://github.com/vitessio/vitess/pull/12543)
+ * collations: fix sorting in UCA900 collations [#12555](https://github.com/vitessio/vitess/pull/12555)
+ * VSchema DDL: Add grammar to accept qualified table names in Vindex option values [#12577](https://github.com/vitessio/vitess/pull/12577)
+ * [release-15.0] `ApplyVSchemaDDL`: escape Sequence names when writing the VSchema (#12519) [#12598](https://github.com/vitessio/vitess/pull/12598)
+ * [gen4 planner] Make sure to not push down expressions when not possible [#12607](https://github.com/vitessio/vitess/pull/12607)
+ * Fix `panic` when executing a prepare statement with over `65,528` parameters [#12614](https://github.com/vitessio/vitess/pull/12614)
+ * [planner bugfix] add expressions to HAVING [#12668](https://github.com/vitessio/vitess/pull/12668)
+ * Use a left join to make sure that tables with tablespace=innodb_system are included in the schema [#12672](https://github.com/vitessio/vitess/pull/12672)
+ * [release-15.0] Always add columns in the `Derived` operator [#12680](https://github.com/vitessio/vitess/pull/12680)
+ * [planner fix] make unknown column an error only for sharded queries [#12704](https://github.com/vitessio/vitess/pull/12704)
+#### VReplication
+ * VReplication Last Error: retry error if it happens after timeout [#12114](https://github.com/vitessio/vitess/pull/12114)
+#### VTorc
+ * Fix unhandled error in VTOrc `recoverDeadPrimary` [#12511](https://github.com/vitessio/vitess/pull/12511)
+### CI/Build
+#### Build/CI
+ * [release-15.0] Make upgrade downgrade job names unique [#12498](https://github.com/vitessio/vitess/pull/12498)
+ * v15 backport: CI: increase overall test timeouts for all OnlineDDL tests [#12591](https://github.com/vitessio/vitess/pull/12591)
+#### Online DDL
+ * CI: extend timeouts in onlineddl_vrepl due to slow CI runners [#12583](https://github.com/vitessio/vitess/pull/12583)
+#### Query Serving
+ * [release-15.0] Flakes: Properly Test HealthCheck Cache Response Handling (#12226) [#12227](https://github.com/vitessio/vitess/pull/12227)
+### Dependabot
+#### Build/CI
+ * Bump golang.org/x/net from 0.5.0 to 0.7.0 (#12390) [#12405](https://github.com/vitessio/vitess/pull/12405)
+### Enhancement
+#### Build/CI
+ * Auto upgrade the Golang version [#12585](https://github.com/vitessio/vitess/pull/12585)
+#### Governance
+ * [release-15.0] Add manan and florent to Docker files CODEOWNERS (#11981) [#11983](https://github.com/vitessio/vitess/pull/11983)
+#### VTorc
+ * Release-15: Cherry pick vtorc no cgo [#12223](https://github.com/vitessio/vitess/pull/12223)
+### Internal Cleanup
+#### Build/CI
+ * [15.0] CI: remove pitrtls test [#12064](https://github.com/vitessio/vitess/pull/12064)
+#### General
+ * Remove removed flags from being used for v16+ binaries [#12128](https://github.com/vitessio/vitess/pull/12128)
+ * [release-15.0] Fix release script for the version in the docker script [#12285](https://github.com/vitessio/vitess/pull/12285)
+### Other
+#### Other
+ * Code freeze of release-15.0 [#12764](https://github.com/vitessio/vitess/pull/12764)
+### Performance
+#### Cluster management
+ * Bug fix: Cache filtered out tablets in topology watcher to avoid unnecessary GetTablet calls to topo [#12194](https://github.com/vitessio/vitess/pull/12194)
+### Release
+#### Build/CI
+ * [release-15.0] Tooling improvements backports [#12527](https://github.com/vitessio/vitess/pull/12527)
+#### Documentation
+ * Re-organize the `releasenotes` directory into `changelog` [#12566](https://github.com/vitessio/vitess/pull/12566)
+#### General
+ * Release of v15.0.2 [#11961](https://github.com/vitessio/vitess/pull/11961)
+ * Back to dev mode after v15.0.2 [#11962](https://github.com/vitessio/vitess/pull/11962)
+### Testing
+#### General
+ * Fix vtbackup upgrade/downgrade test [#12437](https://github.com/vitessio/vitess/pull/12437)
+
diff --git a/changelog/15.0/15.0.3/release_notes.md b/changelog/15.0/15.0.3/release_notes.md
new file mode 100644
index 00000000000..aabb3770528
--- /dev/null
+++ b/changelog/15.0/15.0.3/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v15.0.3
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.3/changelog.md).
+
+The release includes 52 commits (excluding merges)
+
+Thanks to all our contributors: @GuptaManan100, @ajm188, @dbussink, @deepthi, @frouioui, @harshit-gangal, @mattlord, @rsajwani, @shlomi-noach, @systay, @vitess-bot[bot], @vmg
+
diff --git a/changelog/15.0/15.0.4/changelog.md b/changelog/15.0/15.0.4/changelog.md
new file mode 100644
index 00000000000..f70fd1090a7
--- /dev/null
+++ b/changelog/15.0/15.0.4/changelog.md
@@ -0,0 +1,61 @@
+# Changelog of Vitess v15.0.4
+
+### Bug fixes
+#### Build/CI
+ * [release-15.0] Small fixes to the auto-upgrade golang tool (#12838) [#12847](https://github.com/vitessio/vitess/pull/12847)
+ * [release-15.0] Add timeout to golangci-lint and bump its version (#12852) [#12853](https://github.com/vitessio/vitess/pull/12853)
+ * [release-15.0] Remove recent golangci-lint version bump [#12910](https://github.com/vitessio/vitess/pull/12910)
+#### Cluster management
+ * [release-15.0] Prevent resetting replication every time we set replication source (#13377) [#13393](https://github.com/vitessio/vitess/pull/13393)
+ * [release-15.0] Don't run any reparent commands if the host is empty (#13396) [#13403](https://github.com/vitessio/vitess/pull/13403)
+ * [release-15.0] ignore all error for views in engine reload (#13590) [#13592](https://github.com/vitessio/vitess/pull/13592)
+#### Examples
+ * [release-15.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13471](https://github.com/vitessio/vitess/pull/13471)
+#### Online DDL
+ * v15 backport: vitess Online DDL atomic cut-over [#13376](https://github.com/vitessio/vitess/pull/13376)
+#### Query Serving
+ * [release-15.0] planbuilder bugfix - do not push aggregations into derived tables [#12824](https://github.com/vitessio/vitess/pull/12824)
+ * [release-15.0] Fix `vtgate_schema_tracker` flaky tests (#12780) [#12850](https://github.com/vitessio/vitess/pull/12850)
+ * [release-15.0] fix: union distinct between unsharded route and sharded join (#12968) [#12982](https://github.com/vitessio/vitess/pull/12982)
+ * gen4 planner: allow last_insert_id with arguments (15.0) [#13035](https://github.com/vitessio/vitess/pull/13035)
+ * [release-15.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13086](https://github.com/vitessio/vitess/pull/13086)
+ * [release-15.0] Remove indentation limit in the sqlparser (#13158) [#13167](https://github.com/vitessio/vitess/pull/13167)
+ * [release-15.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error (#13193) [#13196](https://github.com/vitessio/vitess/pull/13196)
+ * [15.0] Fix: errant GTID in health streamer (#13184) [#13226](https://github.com/vitessio/vitess/pull/13226)
+#### Schema Tracker
+ * [release-15.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13425](https://github.com/vitessio/vitess/pull/13425)
+ * Backport v15: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13457](https://github.com/vitessio/vitess/pull/13457)
+### Enhancement
+#### Build/CI
+ * Use go1.20.3 in the upgrade downgrade tests [#12839](https://github.com/vitessio/vitess/pull/12839)
+ * [release-15.0] Set the number of threads for release notes generation with a flag [#13315](https://github.com/vitessio/vitess/pull/13315)
+#### General
+ * Use `go1.20.4` on `release-15.0` upgrade test [#13071](https://github.com/vitessio/vitess/pull/13071)
+#### Query Serving
+ * [release-15.0] planner fix: scoping rules for JOIN ON expression inside a subquery [#12890](https://github.com/vitessio/vitess/pull/12890)
+### Internal Cleanup
+#### Operator
+ * Use vitess-operator `v2.8.4` in the examples [#12993](https://github.com/vitessio/vitess/pull/12993)
+#### VTorc
+ * [release-15.0] Remove excessive logging in VTOrc APIs (#13459) [#13463](https://github.com/vitessio/vitess/pull/13463)
+### Performance
+#### TabletManager
+ * [release-15.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13388](https://github.com/vitessio/vitess/pull/13388)
+### Release
+#### Build/CI
+ * [release-15.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13620](https://github.com/vitessio/vitess/pull/13620)
+#### Documentation
+ * Prepare release note `v15.0.4` [#13619](https://github.com/vitessio/vitess/pull/13619)
+### Testing
+#### Build/CI
+ * [release-15.0] fakedbclient: Add locking to avoid races (#12814) [#12821](https://github.com/vitessio/vitess/pull/12821)
+#### Cluster management
+ * [release-15.0] Flaky tests: Fix wrangler tests (#13568) [#13570](https://github.com/vitessio/vitess/pull/13570)
+#### General
+ * [release-15.0] Update Upgrade/Downgrade tests to use `go1.20.5` [#13271](https://github.com/vitessio/vitess/pull/13271)
+#### Query Serving
+ * [release-15.0] Fix benchmarks in `plan_test.go` (#13096) [#13125](https://github.com/vitessio/vitess/pull/13125)
+ * [release-15.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13502](https://github.com/vitessio/vitess/pull/13502)
+#### VTorc
+ * [release-15.0]: Fix flakiness in VTOrc tests (#13489) [#13529](https://github.com/vitessio/vitess/pull/13529)
+
diff --git a/changelog/15.0/15.0.4/release_notes.md b/changelog/15.0/15.0.4/release_notes.md
new file mode 100644
index 00000000000..38fa25f9c78
--- /dev/null
+++ b/changelog/15.0/15.0.4/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v15.0.4
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.4/changelog.md).
+
+The release includes 33 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay
+
diff --git a/changelog/15.0/15.0.4/summary.md b/changelog/15.0/15.0.4/summary.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/changelog/15.0/15.0.5/changelog.md b/changelog/15.0/15.0.5/changelog.md
new file mode 100644
index 00000000000..07ac0749677
--- /dev/null
+++ b/changelog/15.0/15.0.5/changelog.md
@@ -0,0 +1,40 @@
+# Changelog of Vitess v15.0.5
+
+### Bug fixes
+#### Build/CI
+ * [release-15.0] Make `Static Code Checks Etc` fail if the `./changelog` folder is out-of-date (#14003) [#14004](https://github.com/vitessio/vitess/pull/14004)
+ * [release-15.0] Enable failures in `tools/e2e_test_race.sh` and fix races (#13654) [#14009](https://github.com/vitessio/vitess/pull/14009)
+#### Cluster management
+ * [release-15.0] Fix `BackupShard` to get its options from its own flags (#13813) [#13818](https://github.com/vitessio/vitess/pull/13818)
+#### Online DDL
+ * v15 backport: Solve RevertMigration.Comment read/write concurrency issue [#13735](https://github.com/vitessio/vitess/pull/13735)
+ * v15 backport: Onlineddl: formalize "immediate operations", respect --postpone-completion strategy flag [#13832](https://github.com/vitessio/vitess/pull/13832)
+ * [release-15.0] OnlineDDL: fix nil 'completed_timestamp' for cancelled migrations (#13928) [#13935](https://github.com/vitessio/vitess/pull/13935)
+ * [release-15.0] OnlineDDL: cleanup cancelled migration artifacts; support `--retain-artifacts=` DDL strategy flag (#14029) [#14035](https://github.com/vitessio/vitess/pull/14035)
+ * [release-15.0] bugfix: change column name and type to json (#14093) [#14115](https://github.com/vitessio/vitess/pull/14115)
+#### Query Serving
+ * [release-15.0] vtgate: fix race condition iterating tables and views from schema tracker (#13673) [#13794](https://github.com/vitessio/vitess/pull/13794)
+ * [15.0] bugfixes: collection of fixes to bugs found while fuzzing [#13807](https://github.com/vitessio/vitess/pull/13807)
+ * [release-15.0] fix data race in join engine primitive olap streaming mode execution (#14012) [#14014](https://github.com/vitessio/vitess/pull/14014)
+ * [release-15.0] fix: cost to include subshard opcode (#14023) [#14025](https://github.com/vitessio/vitess/pull/14025)
+#### TabletManager
+ * [release-15.0] Fix: convertBoolToSemiSyncAction method to account for all semi sync actions (#13075) [#13957](https://github.com/vitessio/vitess/pull/13957)
+### CI/Build
+#### Documentation
+ * [release-15.0] update docgen to embed commit ID in autogenerated doc frontmatter (#14056) [#14072](https://github.com/vitessio/vitess/pull/14072)
+#### VTorc
+ * [release-15.0] docker: add dedicated vtorc container (#14126) [#14145](https://github.com/vitessio/vitess/pull/14145)
+### Documentation
+#### Documentation
+ * [release-15.0] anonymize homedirs in generated docs (#14101) [#14104](https://github.com/vitessio/vitess/pull/14104)
+### Internal Cleanup
+#### Build/CI
+ * [release-15.0] Bump upgrade test to `go1.20.8` [#13938](https://github.com/vitessio/vitess/pull/13938)
+### Release
+#### General
+ * Code freeze of release-15.0 for `v15.0.5` [#14141](https://github.com/vitessio/vitess/pull/14141)
+### Testing
+#### Build/CI
+ * [release-15.0] Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) [#14118](https://github.com/vitessio/vitess/pull/14118)
+ * [release-15.0] Remove FOSSA Test from CI until we can do it in a secure way (#14119) [#14120](https://github.com/vitessio/vitess/pull/14120)
+
diff --git a/changelog/15.0/15.0.5/release_notes.md b/changelog/15.0/15.0.5/release_notes.md
new file mode 100644
index 00000000000..34f9da437bd
--- /dev/null
+++ b/changelog/15.0/15.0.5/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v15.0.5
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.5/changelog.md).
+
+The release includes 20 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @arthurschreiber, @frouioui, @shlomi-noach, @systay
+
diff --git a/changelog/15.0/README.md b/changelog/15.0/README.md
new file mode 100644
index 00000000000..cc4a6caa0f0
--- /dev/null
+++ b/changelog/15.0/README.md
@@ -0,0 +1,25 @@
+## v15.0
+The dedicated team for this release can be found [here](team.md).
+* **[15.0.5](15.0.5)**
+ * [Changelog](15.0.5/changelog.md)
+ * [Release Notes](15.0.5/release_notes.md)
+
+* **[15.0.4](15.0.4)**
+ * [Changelog](15.0.4/changelog.md)
+ * [Release Notes](15.0.4/release_notes.md)
+
+* **[15.0.3](15.0.3)**
+ * [Changelog](15.0.3/changelog.md)
+ * [Release Notes](15.0.3/release_notes.md)
+
+* **[15.0.2](15.0.2)**
+ * [Changelog](15.0.2/changelog.md)
+ * [Release Notes](15.0.2/release_notes.md)
+
+* **[15.0.1](15.0.1)**
+ * [Changelog](15.0.1/changelog.md)
+ * [Release Notes](15.0.1/release_notes.md)
+
+* **[15.0.0](15.0.0)**
+ * [Changelog](15.0.0/changelog.md)
+ * [Release Notes](15.0.0/release_notes.md)
diff --git a/changelog/15.0/team.md b/changelog/15.0/team.md
new file mode 100644
index 00000000000..b8cbdf809bf
--- /dev/null
+++ b/changelog/15.0/team.md
@@ -0,0 +1,5 @@
+## Release Team for v15
+
+- **Lead:** Rameez Sajwani ([rsajwani](https://github.com/rsajwani)) rameez@planetscale.com
+- **Shadow:** Manan Gupta ([GuptaManan100](https://github.com/GuptaManan100)) manan@planetscale.com
+- **Mentor:** Florent Poinsard ([frouioui](https://github.com/frouioui)) florent@planetscale.com
\ No newline at end of file
diff --git a/doc/releasenotes/7_0_0_release_notes.md b/changelog/7.0/7.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_0_release_notes.md
rename to changelog/7.0/7.0.0/release_notes.md
diff --git a/doc/releasenotes/7_0_1_release_notes.md b/changelog/7.0/7.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_1_release_notes.md
rename to changelog/7.0/7.0.1/release_notes.md
diff --git a/doc/releasenotes/7_0_2_release_notes.md b/changelog/7.0/7.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_2_release_notes.md
rename to changelog/7.0/7.0.2/release_notes.md
diff --git a/doc/releasenotes/7_0_3_release_notes.md b/changelog/7.0/7.0.3/release_notes.md
similarity index 100%
rename from doc/releasenotes/7_0_3_release_notes.md
rename to changelog/7.0/7.0.3/release_notes.md
diff --git a/changelog/7.0/README.md b/changelog/7.0/README.md
new file mode 100644
index 00000000000..7177c6be673
--- /dev/null
+++ b/changelog/7.0/README.md
@@ -0,0 +1,12 @@
+## v7.0
+* **[7.0.3](7.0.3)**
+ * [Release Notes](7.0.3/release_notes.md)
+
+* **[7.0.2](7.0.2)**
+ * [Release Notes](7.0.2/release_notes.md)
+
+* **[7.0.1](7.0.1)**
+ * [Release Notes](7.0.1/release_notes.md)
+
+* **[7.0.0](7.0.0)**
+ * [Release Notes](7.0.0/release_notes.md)
diff --git a/doc/releasenotes/8_0_0_release_notes.md b/changelog/8.0/8.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/8_0_0_release_notes.md
rename to changelog/8.0/8.0.0/release_notes.md
diff --git a/changelog/8.0/README.md b/changelog/8.0/README.md
new file mode 100644
index 00000000000..fa359e7302f
--- /dev/null
+++ b/changelog/8.0/README.md
@@ -0,0 +1,3 @@
+## v8.0
+* **[8.0.0](8.0.0)**
+ * [Release Notes](8.0.0/release_notes.md)
diff --git a/doc/releasenotes/9_0_0_release_notes.md b/changelog/9.0/9.0.0/release_notes.md
similarity index 100%
rename from doc/releasenotes/9_0_0_release_notes.md
rename to changelog/9.0/9.0.0/release_notes.md
diff --git a/doc/releasenotes/9_0_1_release_notes.md b/changelog/9.0/9.0.1/release_notes.md
similarity index 100%
rename from doc/releasenotes/9_0_1_release_notes.md
rename to changelog/9.0/9.0.1/release_notes.md
diff --git a/doc/releasenotes/9_0_2_release_notes.md b/changelog/9.0/9.0.2/release_notes.md
similarity index 100%
rename from doc/releasenotes/9_0_2_release_notes.md
rename to changelog/9.0/9.0.2/release_notes.md
diff --git a/changelog/9.0/README.md b/changelog/9.0/README.md
new file mode 100644
index 00000000000..17f49aa3b47
--- /dev/null
+++ b/changelog/9.0/README.md
@@ -0,0 +1,9 @@
+## v9.0
+* **[9.0.2](9.0.2)**
+ * [Release Notes](9.0.2/release_notes.md)
+
+* **[9.0.1](9.0.1)**
+ * [Release Notes](9.0.1/release_notes.md)
+
+* **[9.0.0](9.0.0)**
+ * [Release Notes](9.0.0/release_notes.md)
diff --git a/changelog/README.md b/changelog/README.md
new file mode 100644
index 00000000000..7f7147b6fb1
--- /dev/null
+++ b/changelog/README.md
@@ -0,0 +1,10 @@
+## Releases
+* [15.0](15.0)
+* [14.0](14.0)
+* [13.0](13.0)
+* [12.0](12.0)
+* [11.0](11.0)
+* [10.0](10.0)
+* [9.0](9.0)
+* [8.0](8.0)
+* [7.0](7.0)
\ No newline at end of file
diff --git a/config/tablet/default.yaml b/config/tablet/default.yaml
index ad88e320871..3321e3f17ff 100644
--- a/config/tablet/default.yaml
+++ b/config/tablet/default.yaml
@@ -117,6 +117,7 @@ cacheResultFields: true # enable-query-plan-field-caching
# enable-tx-throttler
# tx-throttler-config
# tx-throttler-healthcheck-cells
+# tx-throttler-tablet-types
# enable_transaction_limit
# enable_transaction_limit_dry_run
# transaction_limit_per_user
diff --git a/doc/ReplicationLagBasedThrottlingOfTransactions.md b/doc/ReplicationLagBasedThrottlingOfTransactions.md
index ad1d98b151f..68686d4f72f 100644
--- a/doc/ReplicationLagBasedThrottlingOfTransactions.md
+++ b/doc/ReplicationLagBasedThrottlingOfTransactions.md
@@ -30,7 +30,13 @@ If this is not specified a [default](https://github.com/vitessio/vitess/tree/mai
* *tx-throttler-healthcheck-cells*
A comma separated list of datacenter cells. The throttler will only monitor
-the non-RDONLY replicas found in these cells for replication lag.
+the replicas found in these cells for replication lag.
+
+* *tx-throttler-tablet-types*
+
+A comma separated list of tablet types. The throttler will only monitor tablets
+with these types. Only `replica` and/or `rdonly` types are supported. The default
+is `replica`.
# Caveats and Known Issues
* The throttler keeps trying to explore the maximum rate possible while keeping
@@ -39,4 +45,3 @@ lag limit may occasionally be slightly violated.
* Transactions are considered homogeneous. There is currently no support
for specifying how `expensive` a transaction is.
-
diff --git a/doc/flags/14.0-to-15.0-transition/mysqlctl.diff b/doc/flags/14.0-to-15.0-transition/mysqlctl.diff
new file mode 100644
index 00000000000..285919a33f6
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/mysqlctl.diff
@@ -0,0 +1,241 @@
+diff --git a/flags/14.0/mysqlctl.txt b/flags/15.0/mysqlctl.txt
+index a535bc4..6444c8f 100644
+--- a/flags/14.0/mysqlctl.txt
++++ b/flags/15.0/mysqlctl.txt
+@@ -1,150 +1,86 @@
+-Usage of mysqlctl:
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
+- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+- --db_dba_password string db dba password
+- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
+- --db_flags uint Flag values as defined by MySQL.
+- --db_flavor string Flavor overrid. Valid value is FilePos.
+- --db_host string The host name for the tcp connection.
+- --db_port int tcp port
+- --db_server_name string server name of the DB we are connecting to.
+- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+- --db_ssl_ca string connection ssl ca
+- --db_ssl_ca_path string connection ssl ca path
+- --db_ssl_cert string connection ssl certificate
+- --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_port int mysql port (default 3306)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysql_socket string path to the mysql socket
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --port int vttablet port (default 6612)
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_uid uint tablet uid (default 41983)
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++Usage: mysqlctl [global-flags] -- [command-flags]
++
++The commands are listed below. Use 'mysqlctl -- {-h, --help}' for command help.
++
++ init [--wait_time=5m] [--init_db_sql_file=]
++ init_config
++ reinit_config
++ teardown [--wait_time=5m] [--force]
++ start [--wait_time=5m]
++ shutdown [--wait_time=5m]
++ position
++
++Global flags:
++ --alsologtostderr log to standard error as well as files
++ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
++ --app_pool_size int Size of the connection pool for app connections (default 40)
++ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
++ --db-credentials-file string db credentials file; send SIGHUP to reload this file
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
++ --db-credentials-vault-addr string URL to Vault server
++ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
++ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
++ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
++ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
++ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
++ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
++ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
++ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
++ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
++ --db_dba_password string db dba password
++ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
++ --db_dba_user string db dba user userKey (default "vt_dba")
++ --db_flags uint Flag values as defined by MySQL.
++ --db_flavor string Flavor overrid. Valid value is FilePos.
++ --db_host string The host name for the tcp connection.
++ --db_port int tcp port
++ --db_server_name string server name of the DB we are connecting to.
++ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
++ --db_ssl_ca string connection ssl ca
++ --db_ssl_ca_path string connection ssl ca path
++ --db_ssl_cert string connection ssl certificate
++ --db_ssl_key string connection ssl key
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
++ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
++ --dba_pool_size int Size of the connection pool for dba connections (default 20)
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --mysql_port int MySQL port (default 3306)
++ --mysql_server_version string MySQL server version to advertise.
++ --mysql_socket string Path to the mysqld socket file
++ --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
++ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
++ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
++ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
++ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
++ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
++ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
++ --port int port for the server
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
++ --socket_file string Local unix socket file to listen on
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
++ --tablet_uid uint Tablet UID (default 41983)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/doc/flags/14.0-to-15.0-transition/mysqlctld.diff b/doc/flags/14.0-to-15.0-transition/mysqlctld.diff
new file mode 100644
index 00000000000..593cc7476ee
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/mysqlctld.diff
@@ -0,0 +1,174 @@
+diff --git a/flags/14.0/mysqlctld.txt b/flags/15.0/mysqlctld.txt
+index 47df65e..f34697b 100644
+--- a/flags/14.0/mysqlctld.txt
++++ b/flags/15.0/mysqlctld.txt
+@@ -2,48 +2,24 @@ Usage of mysqlctld:
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
++ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+@@ -54,22 +30,19 @@ Usage of mysqlctld:
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -77,36 +50,25 @@ Usage of mysqlctld:
+ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --init_db_sql_file string path to .sql file to run after mysql_install_db
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
++ -h, --help display usage and exit
++ --init_db_sql_file string Path to .sql file to run after mysqld initialization
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_port int mysql port (default 3306)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
++ --mysql_port int MySQL port (default 3306)
+ --mysql_server_version string MySQL server version to advertise.
+- --mysql_socket string path to the mysql socket
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
++ --mysql_socket string Path to the mysqld socket file
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+@@ -114,40 +76,16 @@ Usage of mysqlctld:
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --socket_file string Local unix socket file to listen on
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_uid uint tablet uid (default 41983)
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --wait_time duration how long to wait for mysqld startup or shutdown (default 5m0s)
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --tablet_uid uint Tablet UID (default 41983)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --wait_time duration How long to wait for mysqld startup or shutdown (default 5m0s)
diff --git a/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff b/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff
new file mode 100644
index 00000000000..a5be9159aa0
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtaclcheck.diff
@@ -0,0 +1,91 @@
+diff --git a/flags/14.0/vtaclcheck.txt b/flags/15.0/vtaclcheck.txt
+index e7c9720..6e2c57d 100644
+--- a/flags/14.0/vtaclcheck.txt
++++ b/flags/15.0/vtaclcheck.txt
+.0/vtaclcheck.txt
+@@ -1,67 +1,19 @@
+ Usage of vtaclcheck:
+- --acl_file string The path of the JSON ACL file to check
+- --alsologtostderr log to standard error as well as files
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --static_auth_file string The path of the auth_server_static JSON file to check
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --acl-file string The path of the JSON ACL file to check
++ --alsologtostderr log to standard error as well as files
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --static-auth-file string The path of the auth_server_static JSON file to check
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/doc/flags/14.0-to-15.0-transition/vtadmin.diff b/doc/flags/14.0-to-15.0-transition/vtadmin.diff
new file mode 100644
index 00000000000..b1f229d200c
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtadmin.diff
@@ -0,0 +1,53 @@
+diff --git a/flags/14.0/vtadmin.txt b/flags/15.0/vtadmin.txt
+new file mode 100644
+index 0000000..7ea8436
+--- /dev/null
++++ b/flags/15.0/vtadmin.txt
+@@ -0,0 +1,47 @@
++Usage:
++ vtadmin [flags]
++
++Flags:
++ --addr string address to serve on (default ":15000")
++ --alsologtostderr log to standard error as well as files
++ --cache-refresh-key string instructs a request to ignore any cached data (if applicable) and refresh the cache;usable as an HTTP header named 'X-' and as a gRPC metadata key ''
++ Note: any whitespace characters are replaced with hyphens. (default "vt-cache-refresh")
++ --cluster cluster.ClustersFlag per-cluster configuration. any values here take precedence over those in -cluster-defaults or -cluster-config (default [])
++ --cluster-config cluster.FileConfig path to a yaml cluster configuration. see clusters.example.yaml (default {defaults: *cluster.Config:{ID: Name: DiscoveryImpl: DiscoveryFlagsByImpl:map[] TabletFQDNTmplStr: VtSQLFlags:map[] VtctldFlags:map[] BackupReadPoolConfig: SchemaReadPoolConfig: TopoRWPoolConfig: TopoReadPoolConfig: WorkflowReadPoolConfig: EmergencyFailoverPoolConfig: FailoverPoolConfig: SchemaCacheConfig: vtctldConfigOpts:[] vtsqlConfigOpts:[]}, clusters: []})
++ --cluster-defaults cluster.Config default options for all clusters (default *cluster.Config:{ID: Name: DiscoveryImpl: DiscoveryFlagsByImpl:map[] TabletFQDNTmplStr: VtSQLFlags:map[] VtctldFlags:map[] BackupReadPoolConfig: SchemaReadPoolConfig: TopoRWPoolConfig: TopoReadPoolConfig: WorkflowReadPoolConfig: EmergencyFailoverPoolConfig: FailoverPoolConfig: SchemaCacheConfig: vtctldConfigOpts:[] vtsqlConfigOpts:[]})
++ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
++ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --enable-dynamic-clusters whether to enable dynamic clusters that are set by request header cookies or gRPC metadata
++ --grpc-allow-reflection grpc_cli whether to register the gRPC server for reflection; this is required to use tools like grpc_cli
++ --grpc-enable-channelz whether to enable the channelz service on the gRPC server
++ --grpc-tracing whether to enable tracing on the gRPC server
++ -h, --help help for vtadmin
++ --http-debug-omit-env StringSetFlag name of an environment variable to omit from /debug/env, if http debug endpoints are enabled. specify multiple times to omit multiple env vars
++ --http-debug-sanitize-env StringSetFlag name of an environment variable to sanitize in /debug/env, if http debug endpoints are enabled. specify multiple times to sanitize multiple env vars
++ --http-metrics-endpoint string HTTP endpoint to expose prometheus metrics on. Omit to disable scraping metrics. Using a path used by VTAdmin's http API is unsupported and causes undefined behavior. (default "/metrics")
++ --http-no-compress whether to disable compression of HTTP API responses
++ --http-no-debug whether to disable /debug/pprof/* and /debug/env HTTP endpoints
++ --http-origin strings repeated, comma-separated flag of allowed CORS origins. omit to disable CORS
++ --http-tablet-url-tmpl string [EXPERIMENTAL] Go template string to generate a reachable http(s) address for a tablet. Currently used to make passthrough requests to /debug/vars endpoints. (default "https://{{ .Tablet.Hostname }}:80")
++ --http-tracing whether to enable tracing on the HTTP server
++ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
++ --lame-duck-duration duration length of lame duck period at shutdown (default 5s)
++ --lmux-read-timeout duration how long to spend connection muxing (default 1s)
++ --log_dir string If non-empty, write log files in this directory
++ --logtostderr log to standard error instead of files
++ --no-rbac whether to disable RBAC. must be set if not passing --no-rbac
++ --rbac whether to enable RBAC. must be set if not passing --rbac
++ --rbac-config string path to an RBAC config file. must be set if passing --rbac
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tracer string tracing service to use (default "noop")
++ --tracing-enable-logging whether to enable logging in the tracing service
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ -v, --v Level log level for V logs
++ --version version for vtadmin
diff --git a/doc/flags/14.0-to-15.0-transition/vtbackup.diff b/doc/flags/14.0-to-15.0-transition/vtbackup.diff
new file mode 100644
index 00000000000..475bae16b3c
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtbackup.diff
@@ -0,0 +1,497 @@
+diff --git a/flags/14.0/vtbackup.txt b/flags/15.0/vtbackup.txt
+index 15e5f21..7f81472 100644
+--- a/flags/14.0/vtbackup.txt
++++ b/flags/15.0/vtbackup.txt
+@@ -1,318 +1,175 @@
+ Usage of vtbackup:
+- --allow_first_backup Allow this job to take the first backup of an existing shard.
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)
+- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used
+- --azblob_backup_container_name string Azure Blob Container Name
+- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased) (default 1)
+- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/')
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage (default ceph_backup_config.json)
+- --concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-allprivs-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-allprivs-flags uint deprecated: use db_flags
+- --db-config-allprivs-flavor string deprecated: use db_flavor
+- --db-config-allprivs-host string deprecated: use db_host
+- --db-config-allprivs-pass string db allprivs deprecated: use db_allprivs_password
+- --db-config-allprivs-port int deprecated: use db_port
+- --db-config-allprivs-server_name string deprecated: use db_server_name
+- --db-config-allprivs-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-allprivs-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-allprivs-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-allprivs-ssl-key string deprecated: use db_ssl_key
+- --db-config-allprivs-uname string deprecated: use db_allprivs_user (default vt_allprivs)
+- --db-config-allprivs-unixsocket string deprecated: use db_socket
+- --db-config-app-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-app-flags uint deprecated: use db_flags
+- --db-config-app-flavor string deprecated: use db_flavor
+- --db-config-app-host string deprecated: use db_host
+- --db-config-app-pass string db app deprecated: use db_app_password
+- --db-config-app-port int deprecated: use db_port
+- --db-config-app-server_name string deprecated: use db_server_name
+- --db-config-app-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-app-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-app-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-app-ssl-key string deprecated: use db_ssl_key
+- --db-config-app-uname string deprecated: use db_app_user (default vt_app)
+- --db-config-app-unixsocket string deprecated: use db_socket
+- --db-config-appdebug-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-appdebug-flags uint deprecated: use db_flags
+- --db-config-appdebug-flavor string deprecated: use db_flavor
+- --db-config-appdebug-host string deprecated: use db_host
+- --db-config-appdebug-pass string db appdebug deprecated: use db_appdebug_password
+- --db-config-appdebug-port int deprecated: use db_port
+- --db-config-appdebug-server_name string deprecated: use db_server_name
+- --db-config-appdebug-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-appdebug-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-appdebug-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-appdebug-ssl-key string deprecated: use db_ssl_key
+- --db-config-appdebug-uname string deprecated: use db_appdebug_user (default vt_appdebug)
+- --db-config-appdebug-unixsocket string deprecated: use db_socket
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+- --db-config-erepl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-erepl-dbname string deprecated: dbname does not need to be explicitly configured
+- --db-config-erepl-flags uint deprecated: use db_flags
+- --db-config-erepl-flavor string deprecated: use db_flavor
+- --db-config-erepl-host string deprecated: use db_host
+- --db-config-erepl-pass string db erepl deprecated: use db_erepl_password
+- --db-config-erepl-port int deprecated: use db_port
+- --db-config-erepl-server_name string deprecated: use db_server_name
+- --db-config-erepl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-erepl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-erepl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-erepl-ssl-key string deprecated: use db_ssl_key
+- --db-config-erepl-uname string deprecated: use db_erepl_user (default vt_erepl)
+- --db-config-erepl-unixsocket string deprecated: use db_socket
+- --db-config-filtered-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-filtered-flags uint deprecated: use db_flags
+- --db-config-filtered-flavor string deprecated: use db_flavor
+- --db-config-filtered-host string deprecated: use db_host
+- --db-config-filtered-pass string db filtered deprecated: use db_filtered_password
+- --db-config-filtered-port int deprecated: use db_port
+- --db-config-filtered-server_name string deprecated: use db_server_name
+- --db-config-filtered-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-filtered-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-filtered-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-filtered-ssl-key string deprecated: use db_ssl_key
+- --db-config-filtered-uname string deprecated: use db_filtered_user (default vt_filtered)
+- --db-config-filtered-unixsocket string deprecated: use db_socket
+- --db-config-repl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-repl-flags uint deprecated: use db_flags
+- --db-config-repl-flavor string deprecated: use db_flavor
+- --db-config-repl-host string deprecated: use db_host
+- --db-config-repl-pass string db repl deprecated: use db_repl_password
+- --db-config-repl-port int deprecated: use db_port
+- --db-config-repl-server_name string deprecated: use db_server_name
+- --db-config-repl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-repl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-repl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-repl-ssl-key string deprecated: use db_ssl_key
+- --db-config-repl-uname string deprecated: use db_repl_user (default vt_repl)
+- --db-config-repl-unixsocket string deprecated: use db_socket
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --db_allprivs_password string db allprivs password
+- --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
+- --db_allprivs_user string db allprivs user userKey (default vt_allprivs)
+- --db_app_password string db app password
+- --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
+- --db_app_user string db app user userKey (default vt_app)
+- --db_appdebug_password string db appdebug password
+- --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
+- --db_appdebug_user string db appdebug user userKey (default vt_appdebug)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
+- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+- --db_dba_password string db dba password
+- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
+- --db_erepl_password string db erepl password
+- --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
+- --db_erepl_user string db erepl user userKey (default vt_erepl)
+- --db_filtered_password string db filtered password
+- --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
+- --db_filtered_user string db filtered user userKey (default vt_filtered)
+- --db_flags uint Flag values as defined by MySQL.
+- --db_flavor string Flavor overrid. Valid value is FilePos.
+- --db_host string The host name for the tcp connection.
+- --db_port int tcp port
+- --db_repl_password string db repl password
+- --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
+- --db_repl_user string db repl user userKey (default vt_repl)
+- --db_server_name string server name of the DB we are connecting to.
+- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+- --db_ssl_ca string connection ssl ca
+- --db_ssl_ca_path string connection ssl ca path
+- --db_ssl_cert string connection ssl certificate
+- --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --detach detached mode - run backups detached from the terminal
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --file_backup_storage_root string root directory for the file backup storage
+- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups
+- --gcs_backup_storage_root string root prefix for all backup-related object names
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --init_db_name_override string (init parameter) override the name of the db used by vttablet
+- --init_db_sql_file string path to .sql file to run after mysql_install_db
+- --init_keyspace string (init parameter) keyspace to use for this tablet
+- --init_shard string (init parameter) shard to use for this tablet
+- --initial_backup Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup.
+- --min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1)
+- --min_retention_time duration Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mycnf-file string path to my.cnf, if reading all config params from there
+- --mycnf_bin_log_path string mysql binlog path
+- --mycnf_data_dir string data directory for mysql
+- --mycnf_error_log_path string mysql error log path
+- --mycnf_general_log_path string mysql general log path
+- --mycnf_innodb_data_home_dir string Innodb data home directory
+- --mycnf_innodb_log_group_home_dir string Innodb log group home directory
+- --mycnf_master_info_file string mysql master.info file
+- --mycnf_mysql_port int port mysql is listening on
+- --mycnf_pid_file string mysql pid file
+- --mycnf_relay_log_index_path string mysql relay log index path
+- --mycnf_relay_log_info_path string mysql relay log info path
+- --mycnf_relay_log_path string mysql relay log path
+- --mycnf_secure_file_priv string mysql path for loading secure files
+- --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
+- --mycnf_slow_log_path string mysql slow query log path
+- --mycnf_socket_file string mysql socket file
+- --mycnf_tmp_dir string mysql tmp directory
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_port int mysql port (default 3306)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysql_socket string path to the mysql socket
+- --mysql_timeout duration how long to wait for mysqld startup (default 5m0s)
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --replication_timeout duration DEPRECATED AND UNUSED (default 1h0m0s)
+- --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
+- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided)
+- --s3_backup_aws_region string AWS region to use (default us-east-1)
+- --s3_backup_aws_retries int AWS request retries (default -1)
+- --s3_backup_force_path_style force the s3 path style
+- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors (default LogOff)
+- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file)
+- --s3_backup_storage_bucket string S3 bucket to use for backups
+- --s3_backup_storage_root string root prefix for all backup-related object names
+- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --timeout duration DEPRECATED AND UNUSED (default 2h0m0s)
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --allow_first_backup Allow this job to take the first backup of an existing shard.
++ --alsologtostderr log to standard error as well as files
++ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
++ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
++ --azblob_backup_container_name string Azure Blob Container Name.
++ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
++ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
++ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
++ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
++ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
++ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
++ --concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --db-credentials-file string db credentials file; send SIGHUP to reload this file
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
++ --db-credentials-vault-addr string URL to Vault server
++ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
++ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
++ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
++ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
++ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
++ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
++ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
++ --db_allprivs_password string db allprivs password
++ --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
++ --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
++ --db_app_password string db app password
++ --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
++ --db_app_user string db app user userKey (default "vt_app")
++ --db_appdebug_password string db appdebug password
++ --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
++ --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
++ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
++ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
++ --db_dba_password string db dba password
++ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
++ --db_dba_user string db dba user userKey (default "vt_dba")
++ --db_erepl_password string db erepl password
++ --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
++ --db_erepl_user string db erepl user userKey (default "vt_erepl")
++ --db_filtered_password string db filtered password
++ --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
++ --db_filtered_user string db filtered user userKey (default "vt_filtered")
++ --db_flags uint Flag values as defined by MySQL.
++ --db_flavor string Flavor overrid. Valid value is FilePos.
++ --db_host string The host name for the tcp connection.
++ --db_port int tcp port
++ --db_repl_password string db repl password
++ --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
++ --db_repl_user string db repl user userKey (default "vt_repl")
++ --db_server_name string server name of the DB we are connecting to.
++ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
++ --db_ssl_ca string connection ssl ca
++ --db_ssl_ca_path string connection ssl ca path
++ --db_ssl_cert string connection ssl certificate
++ --db_ssl_key string connection ssl key
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
++ --detach detached mode - run backups detached from the terminal
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --file_backup_storage_root string Root directory for the file backup storage.
++ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
++ --gcs_backup_storage_root string Root prefix for all backup-related object names.
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --init_db_name_override string (init parameter) override the name of the db used by vttablet
++ --init_db_sql_file string path to .sql file to run after mysql_install_db
++ --init_keyspace string (init parameter) keyspace to use for this tablet
++ --init_shard string (init parameter) shard to use for this tablet
++ --initial_backup Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).
++ --keep-alive-timeout duration Wait until timeout elapses after a successful backup before shutting down.
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup.
++ --min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1)
++ --min_retention_time duration Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.
++ --mycnf-file string path to my.cnf, if reading all config params from there
++ --mycnf_bin_log_path string mysql binlog path
++ --mycnf_data_dir string data directory for mysql
++ --mycnf_error_log_path string mysql error log path
++ --mycnf_general_log_path string mysql general log path
++ --mycnf_innodb_data_home_dir string Innodb data home directory
++ --mycnf_innodb_log_group_home_dir string Innodb log group home directory
++ --mycnf_master_info_file string mysql master.info file
++ --mycnf_mysql_port int port mysql is listening on
++ --mycnf_pid_file string mysql pid file
++ --mycnf_relay_log_index_path string mysql relay log index path
++ --mycnf_relay_log_info_path string mysql relay log info path
++ --mycnf_relay_log_path string mysql relay log path
++ --mycnf_secure_file_priv string mysql path for loading secure files
++ --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
++ --mycnf_slow_log_path string mysql slow query log path
++ --mycnf_socket_file string mysql socket file
++ --mycnf_tmp_dir string mysql tmp directory
++ --mysql_port int mysql port (default 3306)
++ --mysql_server_version string MySQL server version to advertise.
++ --mysql_socket string path to the mysql socket
++ --mysql_timeout duration how long to wait for mysqld startup (default 5m0s)
++ --port int port for the server
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
++ --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
++ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
++ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
++ --s3_backup_aws_retries int AWS request retries. (default -1)
++ --s3_backup_force_path_style force the s3 path style.
++ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
++ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
++ --s3_backup_storage_bucket string S3 bucket to use for backups.
++ --s3_backup_storage_root string root prefix for all backup-related object names.
++ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
++ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
++ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
++ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
++ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
++ --topo_global_root string the path of the global topology data in the global topology server
++ --topo_global_server_address string the address of the global topology server
++ --topo_implementation string the topology implementation to use
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
++ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
++ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
++ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
++ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
++ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
++ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
++ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vtctlclient.diff b/doc/flags/14.0-to-15.0-transition/vtctlclient.diff
new file mode 100644
index 00000000000..b8a2978f7af
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtctlclient.diff
@@ -0,0 +1,88 @@
+diff --git a/flags/14.0/vtctlclient.txt b/flags/15.0/vtctlclient.txt
+index 2ee62d7..207f319 100644
+--- a/flags/14.0/vtctlclient.txt
++++ b/flags/15.0/vtctlclient.txt
+@@ -1,43 +1,41 @@
+ Usage of vtctlclient:
+- --action_timeout duration timeout for the total command (default 1h0m0s)
+- --alsologtostderr log to standard error as well as files
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --server string server to use for connection
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default grpc)
+- --vtctld_grpc_ca string the server ca to use to validate servers when connecting
+- --vtctld_grpc_cert string the cert to use to connect
+- --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtctld_grpc_key string the key to use to connect
+- --vtctld_grpc_server_name string the server name to use to validate server certificate
++ --action_timeout duration timeout for the total command (default 1h0m0s)
++ --alsologtostderr log to standard error as well as files
++ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
++ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --server string server to use for connection
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tracer string tracing service to use (default "noop")
++ --tracing-enable-logging whether to enable logging in the tracing service
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
++ --vtctld_grpc_ca string the server ca to use to validate servers when connecting
++ --vtctld_grpc_cert string the cert to use to connect
++ --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtctld_grpc_key string the key to use to connect
++ --vtctld_grpc_server_name string the server name to use to validate server certificate
diff --git a/doc/flags/14.0-to-15.0-transition/vtctld.diff b/doc/flags/14.0-to-15.0-transition/vtctld.diff
new file mode 100644
index 00000000000..61b8aedda54
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtctld.diff
@@ -0,0 +1,411 @@
+diff --git a/flags/14.0/vtctld.txt b/flags/15.0/vtctld.txt
+index a063b8c..887a4da 100644
+--- a/flags/14.0/vtctld.txt
++++ b/flags/15.0/vtctld.txt
+@@ -1,84 +1,45 @@
+ Usage of vtctld:
+ --action_timeout duration time to wait for an action before resorting to force (default 2m0s)
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
+ --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)
+- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used
+- --azblob_backup_container_name string Azure Blob Container Name
+- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased) (default 1)
+- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/')
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
++ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
++ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
++ --azblob_backup_container_name string Azure Blob Container Name.
++ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
++ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default grpc)
+- --binlog_use_v3_resharding_mode (DEPRECATED) True if and only if the binlog streamer should use V3-style sharding, which doesn't require a preset sharding key column. (default true)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
++ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
++ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
++ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --cell string cell to use
+- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage (default ceph_backup_config.json)
+- --client-found-rows-pool-size int DEPRECATED: queryserver-config-transaction-cap will be used instead.
++ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
++ --compression-engine-name string compressor engine used for compression. (default "pargzip")
++ --compression-level int what level to pass to the compressor. (default 1)
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
+- --durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default none)
++ --durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default "none")
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-autocommit This flag is deprecated. Autocommit is always allowed. (default true)
+- --enable-consolidator Synonym to -enable_consolidator (default true)
+- --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+- --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
+- --enable-tx-throttler Synonym to -enable_tx_throttler
+- --enable_consolidator This option enables the query consolidator. (default true)
+- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+- --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+- --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+- --enable_queries [DEPRECATED - query commands via vtctl are being deprecated] if set, allows vtgate and vttablet queries. May have security implications, as the queries will be run from this process.
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+- --enable_realtime_stats Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.
+- --enable_replication_reporter Use polling to track replication lag.
+- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+- --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+- --enable_vtctld_ui If true, the vtctld web interface will be enabled. Default is true. (default true)
+- --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+- --file_backup_storage_root string root directory for the file backup storage
+- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups
+- --gcs_backup_storage_root string root prefix for all backup-related object names
++ --external-compressor string command with arguments to use when compressing a backup.
++ --external-compressor-extension string extension to use when using an external compressor.
++ --external-decompressor string command with arguments to use when decompressing a backup.
++ --file_backup_storage_root string Root directory for the file backup storage.
++ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
++ --gcs_backup_storage_root string Root prefix for all backup-related object names.
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
++ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -87,132 +48,57 @@ Usage of vtctld:
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --health_check_interval duration Interval between health checks (default 20s)
+- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+- --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
+- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+- --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
++ -h, --help display usage and exit
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --online_ddl_check_interval duration deprecated. Will be removed in next Vitess version
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --opentsdb_uri string URI of opentsdb /api/put method
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pool-name-prefix string Deprecated
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --query-log-stream-handler string URL handler for streaming queries log (default /debug/querylog)
+- --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
+- --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-allowunsafe-dmls deprecated
+- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+- --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-dml-rows int query server max dml rows per statement, maximum number of rows allowed to return at a time for an update or delete with either 1) an equality where clauses on primary keys, or 2) a subselect statement. For update and delete statements in above two categories, vttablet will split the original query into multiple small queries based on this configuration value.
+- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-prefill-parallelism int DEPRECATED: Unused.
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+- --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
+- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-prefill-parallelism int query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+- --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
+- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+- --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+- --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
+- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+- --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-prefill-parallelism int query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
+- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+- --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+- --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+- --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+- --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+- --queryserver_enable_online_ddl Enable online DDL. (default true)
+- --redact-debug-ui-queries redact full queries and bind variables from debug UI
+- --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided)
+- --s3_backup_aws_region string AWS region to use (default us-east-1)
+- --s3_backup_aws_retries int AWS request retries (default -1)
+- --s3_backup_force_path_style force the s3 path style
+- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors (default LogOff)
+- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file)
+- --s3_backup_storage_bucket string S3 bucket to use for backups
+- --s3_backup_storage_root string root prefix for all backup-related object names
+- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections
+- --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+- --schema_change_check_interval int this value decides how often we check schema change dir, in seconds (default 60)
+- --schema_change_controller string schema change controller is responsible for finding schema changes and responding to schema change events
+- --schema_change_dir string directory contains schema changes for all keyspaces. Each keyspace has its own directory and schema changes are expected to live in '$KEYSPACE/input' dir. e.g. test_keyspace/input/*sql, each sql file represents a schema change
+- --schema_change_replicas_timeout duration how long to wait for replicas to receive the schema change (default 10s)
+- --schema_change_user string The user who submits this schema change.
++ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
++ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
++ --s3_backup_aws_retries int AWS request retries. (default -1)
++ --s3_backup_force_path_style force the s3 path style.
++ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
++ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
++ --s3_backup_storage_bucket string S3 bucket to use for backups.
++ --s3_backup_storage_root string root prefix for all backup-related object names.
++ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
++ --schema_change_check_interval duration How often the schema change dir is checked for schema changes (deprecated: if passed as a bare integer, the duration will be in seconds). (default 1m0s)
++ --schema_change_controller string Schema change controller is responsible for finding schema changes and responding to schema change events.
++ --schema_change_dir string Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.
++ --schema_change_replicas_timeout duration How long to wait for replicas to receive a schema change. (default 10s)
++ --schema_change_user string The user who schema changes are submitted on behalf of.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+- --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+@@ -226,24 +112,13 @@ Usage of vtctld:
+ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_manager_grpc_key string the key to use to connect
+ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
+- --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+- --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default replica)
+- --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+- --throttler_client_grpc_ca string the server ca to use to validate servers when connecting
+- --throttler_client_grpc_cert string the cert to use to connect
+- --throttler_client_grpc_crl string the server crl to use to validate server certificates when connecting
+- --throttler_client_grpc_key string the key to use to connect
+- --throttler_client_grpc_server_name string the server name to use to validate server certificate
+- --throttler_client_protocol string the protocol to use to talk to the integrated throttler service (default grpc)
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
++ --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
++ --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
++ --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+@@ -256,106 +131,22 @@ Usage of vtctld:
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_read_concurrency int concurrent topo reads (default 32)
++ --topo_read_concurrency int Concurrency of topo reads. (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
++ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction-log-stream-handler string URL handler for streaming transactions log (default /debug/txlog)
+- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --transaction_shutdown_grace_period float DEPRECATED: use shutdown_grace_period instead.
+- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+- --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-config string Synonym to -tx_throttler_config (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx-throttler-healthcheck-cells value Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx_throttler_healthcheck_cells value A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+- --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+- --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+- --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
+- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+- --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence (default 15m0s)
+- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+- --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vreplication_tablet_type string comma separated list of tablet types used as a source (default in_order:REPLICA,PRIMARY)
+- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+- --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default grpc)
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vtctl_healthcheck_retry_delay duration delay before retrying a failed healthcheck (default 5s)
+ --vtctl_healthcheck_timeout duration the health check timeout period (default 1m0s)
+ --vtctl_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+ --vtctld_sanitize_log_messages When true, vtctld sanitizes logging.
+ --vtctld_show_topology_crud Controls the display of the CRUD topology actions in the vtctld UI. (default true)
+- --vtgate_grpc_ca string the server ca to use to validate servers when connecting
+- --vtgate_grpc_cert string the cert to use to connect
+- --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtgate_grpc_key string the key to use to connect
+- --vtgate_grpc_server_name string the server name to use to validate server certificate
+- --vtgate_protocol string how to talk to vtgate (default grpc)
+- --vtworker_client_grpc_ca string (DEPRECATED) the server ca to use to validate servers when connecting
+- --vtworker_client_grpc_cert string (DEPRECATED) the cert to use to connect
+- --vtworker_client_grpc_crl string (DEPRECATED) the server crl to use to validate server certificates when connecting
+- --vtworker_client_grpc_key string (DEPRECATED) the key to use to connect
+- --vtworker_client_grpc_server_name string (DEPRECATED) the server name to use to validate server certificate
+- --vtworker_client_protocol string (DEPRECATED) the protocol to use to talk to the vtworker server (default grpc)
+- --wait_for_drain_sleep_rdonly duration (DEPRECATED) time to wait before shutting the query service on old RDONLY tablets during MigrateServedTypes (default 5s)
+- --wait_for_drain_sleep_replica duration (DEPRECATED) time to wait before shutting the query service on old REPLICA tablets during MigrateServedTypes (default 15s)
+- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --web_dir string NOT USED, here for backward compatibility
+- --web_dir2 string NOT USED, here for backward compatibility
+- --workflow_manager_disable value comma separated list of workflow types to disable
+- --workflow_manager_init Initialize the workflow manager in this vtctld instance.
+- --workflow_manager_use_election if specified, will use a topology server-based master election to ensure only one workflow manager is active at a time.
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vtctldclient.diff b/doc/flags/14.0-to-15.0-transition/vtctldclient.diff
new file mode 100644
index 00000000000..40874d41ac7
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtctldclient.diff
@@ -0,0 +1,216 @@
+diff --git a/flags/14.0/vtctldclient.txt b/flags/15.0/vtctldclient.txt
+index ddff2f5..35c7092 100644
+--- a/flags/14.0/vtctldclient.txt
++++ b/flags/15.0/vtctldclient.txt
+@@ -8,6 +8,7 @@ Available Commands:
+ AddCellsAlias Defines a group of cells that can be referenced by a single name (the alias).
+ ApplyRoutingRules Applies the VSchema routing rules.
+ ApplySchema Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.
++ ApplyShardRoutingRules Applies VSchema shard routing rules.
+ ApplyVSchema Applies the VTGate routing schema to the provided keyspace. Shows the result after application.
+ Backup Uses the BackupStorage service on the given tablet to create and store a new backup.
+ BackupShard Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.
+@@ -29,12 +30,14 @@ Available Commands:
+ GetCellInfo Gets the CellInfo object for the given cell.
+ GetCellInfoNames Lists the names of all cells in the cluster.
+ GetCellsAliases Gets all CellsAlias objects in the cluster.
++ GetFullStatus Outputs a JSON structure that contains full status of MySQL including the replication information, semi-sync information, GTID information among others.
+ GetKeyspace Returns information about the given keyspace from the topology.
+ GetKeyspaces Returns information about every keyspace in the topology.
+ GetPermissions Displays the permissions for a tablet.
+ GetRoutingRules Displays the VSchema routing rules.
+ GetSchema Displays the full schema for a tablet, optionally restricted to the specified tables/views.
+ GetShard Returns information about a shard in the topology.
++ GetShardRoutingRules Displays VSchema shard routing rules.
+ GetSrvKeyspaceNames Outputs a JSON mapping of cell=>keyspace names served in that cell. Omit to query all cells.
+ GetSrvKeyspaces Returns the SrvKeyspaces for the given keyspace in one or more cells.
+ GetSrvVSchema Returns the SrvVSchema for the given cell.
+@@ -42,9 +45,9 @@ Available Commands:
+ GetTablet Outputs a JSON structure that contains information about the tablet.
+ GetTabletVersion Print the version of a tablet from its debug vars.
+ GetTablets Looks up tablets according to filter criteria.
++ GetTopologyPath Gets the file located at the specified path in the topology server.
+ GetVSchema Prints a JSON representation of a keyspace's topo record.
+ GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.
+- InitShardPrimary Sets the initial primary for the shard.
+ LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort.
+ PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations.
+ PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running.
+@@ -63,7 +66,7 @@ Available Commands:
+ RunHealthCheck Runs a healthcheck on the remote tablet.
+ SetKeyspaceDurabilityPolicy Sets the durability-policy used by the specified keyspace.
+ SetShardIsPrimaryServing Add or remove a shard from serving. This is meant as an emergency function. It does not rebuild any serving graphs; i.e. it does not run `RebuildKeyspaceGraph`.
+- SetShardTabletControl Sets the TabletControl record for a shard and tablet type. Only use this for an emergency fix or after a finished MoveTables. The MigrateServedFrom and MigrateServedType commands set this record appropriately already.
++ SetShardTabletControl Sets the TabletControl record for a shard and tablet type. Only use this for an emergency fix or after a finished MoveTables.
+ SetWritable Sets the specified tablet as writable or read-only.
+ ShardReplicationFix Walks through a ShardReplication object and fixes the first error encountered.
+ ShardReplicationPositions
+@@ -80,134 +83,42 @@ Available Commands:
+ ValidateSchemaKeyspace Validates that the schema on the primary tablet for shard 0 matches the schema on all other tablets in the keyspace.
+ ValidateShard Validates that all nodes reachable from the specified shard are consistent.
+ ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace.
++ ValidateVersionShard Validates that the version on the primary matches all of the replicas.
++ completion Generate the autocompletion script for the specified shell
+ help Help about any command
+
+ Flags:
+- --allowed_tablet_types TabletTypeList Specifies the tablet types this vtgate is allowed to route queries to
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-consolidator Synonym to -enable_consolidator (default true)
+- --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-tx-throttler Synonym to -enable_tx_throttler
+- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests (default 0s)
+- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) (default 0s)
+- --keyspaces_to_watch StringList Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+- --log_dir string If non-empty, write log files in this directory
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials (default 0s)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pool-name-prefix string Deprecated
+- --pprof string enable profiling
+- --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
+- --querylog-format string format for query logs ("text" or "json") (default "text")
+- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+- --redact-debug-ui-queries redact full queries and bind variables from debug UI
+- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map StringList comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default "grpc")
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default "http://{{.GetTabletHostPort}}")
+- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_implementation string the topology implementation to use
+- --tracer string tracing service to use (default "noop")
+- --tracing-sampling-rate OptionalFloat64 sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-healthcheck-cells StringList Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_healthcheck_cells StringList A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+- -v, --v Level log level for V logs
+- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default "grpc")
+- --vtctld_grpc_cert string the cert to use to connect
+- --vtctld_grpc_key string the key to use to connect
+- --wait_for_drain_sleep_rdonly duration (DEPRECATED) time to wait before shutting the query service on old RDONLY tablets during MigrateServedTypes (default 5s)
+- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --action_timeout duration timeout for the total command (default 1h0m0s)
++ --alsologtostderr log to standard error as well as files
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help help for vtctldclient
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --mysql_server_version string MySQL server version to advertise.
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --server string server to use for connection (required)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ -v, --v Level log level for V logs
++ --version version for vtctldclient
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
++ --vtctld_grpc_ca string the server ca to use to validate servers when connecting
++ --vtctld_grpc_cert string the cert to use to connect
++ --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtctld_grpc_key string the key to use to connect
++ --vtctld_grpc_server_name string the server name to use to validate server certificate
+
+ Use "vtctldclient [command] --help" for more information about a command.
diff --git a/doc/flags/14.0-to-15.0-transition/vtexplain.diff b/doc/flags/14.0-to-15.0-transition/vtexplain.diff
new file mode 100644
index 00000000000..0c63b374ecd
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtexplain.diff
@@ -0,0 +1,417 @@
+diff --git a/flags/14.0/vtexplain.txt b/flags/15.0/vtexplain.txt
+index 00a605e..2666e0b 100644
+--- a/flags/14.0/vtexplain.txt
++++ b/flags/15.0/vtexplain.txt
+@@ -1,353 +1,60 @@
+ Usage of vtexplain:
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --batch-interval duration Interval between logical time slots. (default 10ms)
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default grpc)
+- --binlog_use_v3_resharding_mode (DEPRECATED) True if and only if the binlog streamer should use V3-style sharding, which doesn't require a preset sharding key column. (default true)
+- --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
+- --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default keyspace_events)
+- --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
+- --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s)
+- --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s)
+- --buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
+- --buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cells_to_watch string comma-separated list of cells for watching tablets
+- --client-found-rows-pool-size int DEPRECATED: queryserver-config-transaction-cap will be used instead.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default fail)
+- --dbname string Optional database target to override normal routing
+- --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default direct)
+- --default_tablet_type value The default tablet type to set for queries, when one is not explicitly selected (default PRIMARY)
+- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --disable_local_gateway deprecated: if specified, this process will not route any queries to local tablets in the local cell
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-autocommit This flag is deprecated. Autocommit is always allowed. (default true)
+- --enable-consolidator Synonym to -enable_consolidator (default true)
+- --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+- --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
+- --enable-tx-throttler Synonym to -enable_tx_throttler
+- --enable_buffer Enable buffering (stalling) of primary traffic during failovers.
+- --enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
+- --enable_consolidator This option enables the query consolidator. (default true)
+- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+- --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+- --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+- --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+- --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+- --enable_replication_reporter Use polling to track replication lag.
+- --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true)
+- --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+- --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+- --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+- --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default multi)
+- --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default allow)
+- --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+- --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
+- --gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
+- --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
+- --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
+- --gh-ost-path string override default gh-ost binary full path
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --health_check_interval duration Interval between health checks (default 20s)
+- --healthcheck_retry_delay duration health check retry delay (default 2ms)
+- --healthcheck_timeout duration the health check timeout period (default 1m0s)
+- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+- --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
+- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+- --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+- --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
+- --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+- --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_queries_to_file string Enable query logging to the specified file
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
+- --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
+- --migration_check_interval duration Interval between migration checks (default 1m0s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
+- --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default static)
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default OLTP)
+- --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
+- --mysql_server_query_timeout duration mysql query timeout
+- --mysql_server_read_timeout duration connection read timeout
+- --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
+- --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
+- --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
+- --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL
+- --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL
+- --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
+- --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+- --mysql_server_version string MySQL server version to advertise.
+- --mysql_server_write_timeout duration connection write timeout
+- --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish
+- --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default tcp)
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
+- --normalize Whether to enable vtgate normalization
+- --normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --output-mode string Output in human-friendly text or json (default text)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4
+- --planner_version string Deprecated flag. Use planner-version instead
+- --pool-name-prefix string Deprecated
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --pprof string enable profiling
+- --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
+- --pt-osc-path string override default pt-online-schema-change binary full path
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --query-log-stream-handler string URL handler for streaming queries log (default /debug/querylog)
+- --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
+- --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-allowunsafe-dmls deprecated
+- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+- --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-dml-rows int query server max dml rows per statement, maximum number of rows allowed to return at a time for an update or delete with either 1) an equality where clauses on primary keys, or 2) a subselect statement. For update and delete statements in above two categories, vttablet will split the original query into multiple small queries based on this configuration value.
+- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-prefill-parallelism int DEPRECATED: Unused.
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+- --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
+- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-prefill-parallelism int query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+- --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+- --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
+- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+- --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+- --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
+- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+- --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-prefill-parallelism int query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
+- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+- --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+- --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+- --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+- --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+- --queryserver_enable_online_ddl Enable online DDL. (default true)
+- --redact-debug-ui-queries redact full queries and bind variables from debug UI
+- --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default ROW)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
+- --retry-count int retry count (default 2)
+- --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+- --schema string The SQL table schema
+- --schema-file string Identifies the file that contains the SQL table schema
+- --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
+- --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+- --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2)
+- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+- --sql string A list of semicolon-delimited SQL commands to analyze
+- --sql-file string Identifies the file that contains the SQL commands to analyze
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+- --srv_topo_timeout duration topo server timeout (default 5s)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768)
+- --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
+- --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default hold,purge,evac,drop)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
+- --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+- --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default replica)
+- --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_read_concurrency int concurrent topo reads (default 32)
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction-log-stream-handler string URL handler for streaming transactions log (default /debug/txlog)
+- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+- --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default MULTI)
+- --transaction_shutdown_grace_period float DEPRECATED: use shutdown_grace_period instead.
+- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+- --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-config string Synonym to -tx_throttler_config (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx-throttler-healthcheck-cells value Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx_throttler_healthcheck_cells value A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+- --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+- --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+- --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
+- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+- --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence (default 15m0s)
+- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+- --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vreplication_tablet_type string comma separated list of tablet types used as a source (default in_order:REPLICA,PRIMARY)
+- --vschema string Identifies the VTGate routing schema
+- --vschema-file string Identifies the VTGate routing schema file
+- --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
+- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+- --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+- --vtgate-config-terse-errors prevent bind vars from escaping in returned errors
+- --vtgate_protocol string how to talk to vtgate (default grpc)
+- --warn_memory_rows int Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented. (default 30000)
+- --warn_payload_size int The warning threshold for query payloads in bytes. A payload greater than this threshold will cause the VtGateWarnings.WarnPayloadSizeExceeded counter to be incremented.
+- --warn_sharded_only If any features that are only available in unsharded mode are used, query execution warnings will be added to the session
+- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --alsologtostderr log to standard error as well as files
++ --batch-interval duration Interval between logical time slots. (default 10ms)
++ --dbname string Optional database target to override normal routing
++ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
++ --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi")
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
++ --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_queries_to_file string Enable query logging to the specified file
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
++ --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
++ --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
++ --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static")
++ --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP")
++ --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
++ --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
++ --mysql_server_query_timeout duration mysql query timeout (default 0s)
++ --mysql_server_read_timeout duration connection read timeout (default 0s)
++ --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
++ --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
++ --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
++ --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL
++ --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL
++ --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
++ --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
++ --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
++ --mysql_server_version string MySQL server version to advertise.
++ --mysql_server_write_timeout duration connection write timeout (default 0s)
++ --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
++ --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
++ --normalize Whether to enable vtgate normalization
++ --output-mode string Output in human-friendly text or json (default "text")
++ --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4
++ --pprof strings enable profiling
++ --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
++ --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW")
++ --schema string The SQL table schema
++ --schema-file string Identifies the file that contains the SQL table schema
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2)
++ --sql string A list of semicolon-delimited SQL commands to analyze
++ --sql-file string Identifies the file that contains the SQL commands to analyze
++ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
++ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vschema string Identifies the VTGate routing schema
++ --vschema-file string Identifies the VTGate routing schema file
diff --git a/doc/flags/14.0-to-15.0-transition/vtgate.diff b/doc/flags/14.0-to-15.0-transition/vtgate.diff
new file mode 100644
index 00000000000..6e055031636
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtgate.diff
@@ -0,0 +1,247 @@
+diff --git a/flags/14.0/vtgate.txt b/flags/15.0/vtgate.txt
+index 49c7f59..e9e8591 100644
+--- a/flags/14.0/vtgate.txt
++++ b/flags/15.0/vtgate.txt
+@@ -1,48 +1,48 @@
+ Usage of vtgate:
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
++ --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.
+ --alsologtostderr log to standard error as well as files
+ --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
+- --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default keyspace_events)
++ --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events")
+ --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
+ --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s)
+ --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s)
+ --buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
+ --buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cell string cell to use (default test_nj)
++ --cell string cell to use
+ --cells_to_watch string comma-separated list of cells for watching tablets
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default fail)
+- --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default direct)
+- --default_tablet_type value The default tablet type to set for queries, when one is not explicitly selected (default PRIMARY)
+- --disable_local_gateway deprecated: if specified, this process will not route any queries to local tablets in the local cell
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
++ --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default "fail")
++ --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default "direct")
++ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
++ --discovery_high_replication_lag_minimum_serving duration Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag. (default 2h0m0s)
++ --discovery_low_replication_lag duration Threshold below which replication lag is considered low enough to be healthy. (default 30s)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --enable-partial-keyspace-migration (Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false)
+ --enable_buffer Enable buffering (stalling) of primary traffic during failovers.
+ --enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
+ --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+ --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+ --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true)
+ --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+- --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default allow)
++ --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
+ --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+ --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+ --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
+ --gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
++ --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
++ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -51,8 +51,8 @@ Usage of vtgate:
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+@@ -61,14 +61,15 @@ Usage of vtgate:
+ --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.
+ --healthcheck_retry_delay duration health check retry delay (default 2ms)
+ --healthcheck_timeout duration the health check timeout period (default 1m0s)
++ -h, --help display usage and exit
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
++ --keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
++ --legacy_replication_lag_algorithm Use the legacy algorithm when selecting vttablets for serving. (default true)
+ --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_queries_to_file string Enable query logging to the specified file
+@@ -76,34 +77,33 @@ Usage of vtgate:
+ --logtostderr log to standard error instead of files
+ --max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
+ --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+ --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
++ --min_number_serving_vttablets int The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving. (default 2)
++ --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
+ --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
+- --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default static)
++ --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static")
+ --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+ --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+ --mysql_auth_static_reload_interval duration Ticker to reload credentials
+ --mysql_auth_vault_addr string URL to Vault server
+ --mysql_auth_vault_path string Vault path to vtgate credentials JSON blob, e.g.: secret/data/prod/vtgatecreds
+- --mysql_auth_vault_role_mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
++ --mysql_auth_vault_role_mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --mysql_auth_vault_role_secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --mysql_auth_vault_roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --mysql_auth_vault_timeout duration Timeout for vault API operations (default 10s)
+ --mysql_auth_vault_tls_ca string Path to CA PEM for validating Vault server certificate
+ --mysql_auth_vault_tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --mysql_auth_vault_ttl duration How long to cache vtgate credentials from the Vault server (default 30m0s)
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default OLTP)
++ --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
++ --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP")
+ --mysql_ldap_auth_config_file string JSON File from which to read LDAP server config.
+ --mysql_ldap_auth_config_string string JSON representation of LDAP server config.
+- --mysql_ldap_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
++ --mysql_ldap_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
+ --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
+ --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+ --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
+- --mysql_server_query_timeout duration mysql query timeout
+- --mysql_server_read_timeout duration connection read timeout
++ --mysql_server_query_timeout duration mysql query timeout (default 0s)
++ --mysql_server_read_timeout duration connection read timeout (default 0s)
+ --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
+ --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
+ --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
+@@ -113,9 +113,9 @@ Usage of vtgate:
+ --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --mysql_server_version string MySQL server version to advertise.
+- --mysql_server_write_timeout duration connection write timeout
+- --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish
+- --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default tcp)
++ --mysql_server_write_timeout duration connection write timeout (default 0s)
++ --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
++ --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
+ --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
+ --normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+@@ -123,13 +123,13 @@ Usage of vtgate:
+ --opentsdb_uri string URI of opentsdb /api/put method
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.
+- --planner_version string Deprecated flag. Use planner-version instead
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
+ --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
++ --querylog-format string format for query logs ("text" or "json") (default "text")
+ --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --redact-debug-ui-queries redact full queries and bind variables from debug UI
+ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+@@ -137,7 +137,7 @@ Usage of vtgate:
+ --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
+ --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+@@ -145,27 +145,26 @@ Usage of vtgate:
+ --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --statsd_address string Address for statsd client
+- --statsd_sample_rate float (default 1)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --statsd_sample_rate float Sample rate for statsd metrics (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768)
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
++ --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_grpc_key string the key to use to connect
+ --tablet_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_types_to_wait string wait till connected for specified tablet types during Gateway initialization
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
++ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
++ --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
++ --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
++ --tablet_types_to_wait strings Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.
++ --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+@@ -178,21 +177,21 @@ Usage of vtgate:
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_read_concurrency int concurrent topo reads (default 32)
++ --topo_read_concurrency int Concurrency of topo reads. (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
++ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default MULTI)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
++ --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
+ --vtctld_addr string address of a vtctld instance
+ --vtgate-config-terse-errors prevent bind vars from escaping in returned errors
diff --git a/doc/flags/14.0-to-15.0-transition/vtgr.diff b/doc/flags/14.0-to-15.0-transition/vtgr.diff
new file mode 100644
index 00000000000..226fd80b7b0
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtgr.diff
@@ -0,0 +1,187 @@
+diff --git a/flags/14.0/vtgr.txt b/flags/15.0/vtgr.txt
+index a4c928e..75e7b0a 100644
+--- a/flags/14.0/vtgr.txt
++++ b/flags/15.0/vtgr.txt
+@@ -1,111 +1,72 @@
+ Usage of vtgr:
+- --abort_rebootstrap don't allow vtgr to rebootstrap an existing group
+- --alsologtostderr log to standard error as well as files
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --clusters_to_watch string Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db_config string full path to db config file that will be used by VTGR
+- --db_flavor string mysql flavor override (default MySQL56)
+- --db_port int local mysql port, set this to enable local fast check
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable_heartbeat_check enable heartbeat checking, set together with -group_heartbeat_threshold
+- --gr_port int port to bootstrap a mysql group (default 33061)
+- --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with -enable_heartbeat_check
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s)
+- --pprof string enable profiling
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --refresh_interval duration refresh interval to load tablets (default 10s)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --scan_interval duration scan interval to diagnose and repair (default 3s)
+- --scan_repair_timeout duration time to wait for a Diagnose and repair operation (default 3s)
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default noop)
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vtgr_config string config file for vtgr
++ --abort_rebootstrap Don't allow vtgr to rebootstrap an existing group.
++ --alsologtostderr log to standard error as well as files
++ --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --db_config string Full path to db config file that will be used by VTGR.
++ --db_flavor string MySQL flavor override. (default "MySQL56")
++ --db_port int Local mysql port, set this to enable local fast check.
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold.
++ --gr_port int Port to bootstrap a MySQL group. (default 33061)
++ --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s)
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --refresh_interval duration Refresh interval to load tablets. (default 10s)
++ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
++ --scan_interval duration Scan interval to diagnose and repair. (default 3s)
++ --scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
++ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
++ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
++ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
++ --topo_global_root string the path of the global topology data in the global topology server
++ --topo_global_server_address string the address of the global topology server
++ --topo_implementation string the topology implementation to use
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vtgr_config string Config file for vtgr.
diff --git a/doc/flags/14.0-to-15.0-transition/vtorc.diff b/doc/flags/14.0-to-15.0-transition/vtorc.diff
new file mode 100644
index 00000000000..3bf06c2a6a4
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vtorc.diff
@@ -0,0 +1,212 @@
+diff --git a/flags/14.0/vtorc.txt b/flags/15.0/vtorc.txt
+index 732595e..74ab84c 100644
+--- a/flags/14.0/vtorc.txt
++++ b/flags/15.0/vtorc.txt
+@@ -1,123 +1,85 @@
+ Usage of vtorc:
+- --alsologtostderr log to standard error as well as files
+- --binlog string Binary log file name
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --clusters_to_watch string Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
+- --config string config file name
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --d string destination instance, host_fqdn[:port] (synonym to -s)
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --debug debug mode (very verbose)
+- --discovery auto discovery mode (default true)
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-database-update Enable database update, overrides SkipOrchestratorDatabaseUpdate
+- --grab-election Grab leadership (only applies to continuous mode)
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --ignore-raft-setup Override RaftEnabled for CLI invocation (CLI by default not allowed for raft setups). NOTE: operations by CLI invocation may not reflect in all raft nodes.
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --noop Dry run; do not perform destructing operations
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --orc_web_dir string Orchestrator http file location (default "web/orchestrator")
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --pprof string enable profiling
+- --promotion-rule string Promotion rule for register-andidate (prefer|neutral|prefer_not|must_not) (default "prefer")
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --quiet quiet
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --s string sibling instance, host_fqdn[:port]
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --shutdown_wait_time duration maximum time to wait for vtorc to release all the locks that it is holding before shutting down on SIGTERM (default 30s)
+- --skip-continuous-registration Skip cli commands performaing continuous registration (to reduce orchestratrator backend db load
+- --skip-unresolve Do not unresolve a host name
+- --skip-unresolve-check Skip/ignore checking an unresolve mapping (via hostname_unresolve table) resolves back to same hostname
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stack add stack trace upon error
+- --statement string Statement/hint
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default "grpc")
+- --tag string tag to add ('tagname' or 'tagname=tagvalue') or to search ('tagname' or 'tagname=tagvalue' or comma separated 'tag0,tag1=val1,tag2' for intersection of all)
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default "noop")
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --v value log level for V logs
+- --verbose verbose
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --alsologtostderr log to standard error as well as files
++ --audit-file-location string File location where the audit logs are to be stored
++ --audit-purge-duration duration Duration for which audit logs are held before being purged. Should be in multiples of days (default 168h0m0s)
++ --audit-to-backend Whether to store the audit log in the VTOrc database
++ --audit-to-syslog Whether to store the audit log in the syslog
++ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
++ --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
++ --config string config file name
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --emit_stats If set, emit stats to push-based monitoring and stats backends
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ -h, --help display usage and exit
++ --instance-poll-time duration Timer duration on which VTOrc refreshes MySQL information (default 5s)
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
++ --lock-shard-timeout duration Duration for which a shard lock is held when running a recovery (default 30s)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
++ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
++ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
++ --port int port for the server
++ --pprof strings enable profiling
++ --prevent-cross-cell-failover Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s)
++ --recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s)
++ --recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s)
++ --remote_operation_timeout duration time to wait for a remote operation (default 30s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --shutdown_wait_time duration Maximum time to wait for VTOrc to release all the locks that it is holding before shutting down on SIGTERM (default 30s)
++ --snapshot-topology-interval duration Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours
++ --sqlite-data-file string SQLite Datafile to use as VTOrc's database (default "file::memory:?mode=memory&cache=shared")
++ --stats_backend string The name of the registered push-based monitoring/stats backend to use
++ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_drop_variables string Variables to be dropped from the list of exported variables.
++ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s)
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
++ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
++ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
++ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
++ --topo_global_root string the path of the global topology data in the global topology server
++ --topo_global_server_address string the address of the global topology server
++ --topo_implementation string the topology implementation to use
++ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
++ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
++ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s)
diff --git a/doc/flags/14.0-to-15.0-transition/vttablet.diff b/doc/flags/14.0-to-15.0-transition/vttablet.diff
new file mode 100644
index 00000000000..a42e8f6b932
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vttablet.diff
@@ -0,0 +1,577 @@
+diff --git a/flags/14.0/vttablet.txt b/flags/15.0/vttablet.txt
+index 96a4298..25807a7 100644
+--- a/flags/14.0/vttablet.txt
++++ b/flags/15.0/vttablet.txt
+@@ -1,19 +1,17 @@
+ Usage of vttablet:
+- --allowed_tablet_types value Specifies the tablet types this vtgate is allowed to route queries to
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path)
+- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used
+- --azblob_backup_container_name string Azure Blob Container Name
+- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased) (default 1)
+- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/')
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default builtin)
++ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
++ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
++ --azblob_backup_container_name string Azure Blob Container Name.
++ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
++ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
++ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --binlog_host string PITR restore parameter: hostname/IP of binlog server.
+ --binlog_password string PITR restore parameter: password of binlog server.
+ --binlog_player_grpc_ca string the server ca to use to validate servers when connecting
+@@ -21,120 +19,29 @@ Usage of vttablet:
+ --binlog_player_grpc_crl string the server crl to use to validate server certificates when connecting
+ --binlog_player_grpc_key string the key to use to connect
+ --binlog_player_grpc_server_name string the server name to use to validate server certificate
+- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default grpc)
++ --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
+ --binlog_port int PITR restore parameter: port of binlog server.
+ --binlog_ssl_ca string PITR restore parameter: Filename containing TLS CA certificate to verify binlog server TLS certificate against.
+ --binlog_ssl_cert string PITR restore parameter: Filename containing mTLS client certificate to present to binlog server as authentication.
+ --binlog_ssl_key string PITR restore parameter: Filename containing mTLS client private key for use in binlog server authentication.
+- --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in -binlog_host).
+- --binlog_use_v3_resharding_mode (DEPRECATED) True if and only if the binlog streamer should use V3-style sharding, which doesn't require a preset sharding key column. (default true)
++ --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in --binlog_host).
+ --binlog_user string PITR restore parameter: username of binlog server.
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
++ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
++ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage (default ceph_backup_config.json)
+- --client-found-rows-pool-size int DEPRECATED: queryserver-config-transaction-cap will be used instead.
++ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
++ --compression-engine-name string compressor engine used for compression. (default "pargzip")
++ --compression-level int what level to pass to the compressor. (default 1)
++ --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152)
++ --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728)
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-config-allprivs-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-allprivs-flags uint deprecated: use db_flags
+- --db-config-allprivs-flavor string deprecated: use db_flavor
+- --db-config-allprivs-host string deprecated: use db_host
+- --db-config-allprivs-pass string db allprivs deprecated: use db_allprivs_password
+- --db-config-allprivs-port int deprecated: use db_port
+- --db-config-allprivs-server_name string deprecated: use db_server_name
+- --db-config-allprivs-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-allprivs-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-allprivs-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-allprivs-ssl-key string deprecated: use db_ssl_key
+- --db-config-allprivs-uname string deprecated: use db_allprivs_user (default vt_allprivs)
+- --db-config-allprivs-unixsocket string deprecated: use db_socket
+- --db-config-app-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-app-flags uint deprecated: use db_flags
+- --db-config-app-flavor string deprecated: use db_flavor
+- --db-config-app-host string deprecated: use db_host
+- --db-config-app-pass string db app deprecated: use db_app_password
+- --db-config-app-port int deprecated: use db_port
+- --db-config-app-server_name string deprecated: use db_server_name
+- --db-config-app-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-app-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-app-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-app-ssl-key string deprecated: use db_ssl_key
+- --db-config-app-uname string deprecated: use db_app_user (default vt_app)
+- --db-config-app-unixsocket string deprecated: use db_socket
+- --db-config-appdebug-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-appdebug-flags uint deprecated: use db_flags
+- --db-config-appdebug-flavor string deprecated: use db_flavor
+- --db-config-appdebug-host string deprecated: use db_host
+- --db-config-appdebug-pass string db appdebug deprecated: use db_appdebug_password
+- --db-config-appdebug-port int deprecated: use db_port
+- --db-config-appdebug-server_name string deprecated: use db_server_name
+- --db-config-appdebug-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-appdebug-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-appdebug-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-appdebug-ssl-key string deprecated: use db_ssl_key
+- --db-config-appdebug-uname string deprecated: use db_appdebug_user (default vt_appdebug)
+- --db-config-appdebug-unixsocket string deprecated: use db_socket
+- --db-config-dba-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-dba-flags uint deprecated: use db_flags
+- --db-config-dba-flavor string deprecated: use db_flavor
+- --db-config-dba-host string deprecated: use db_host
+- --db-config-dba-pass string db dba deprecated: use db_dba_password
+- --db-config-dba-port int deprecated: use db_port
+- --db-config-dba-server_name string deprecated: use db_server_name
+- --db-config-dba-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-dba-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-dba-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-dba-ssl-key string deprecated: use db_ssl_key
+- --db-config-dba-uname string deprecated: use db_dba_user (default vt_dba)
+- --db-config-dba-unixsocket string deprecated: use db_socket
+- --db-config-erepl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-erepl-dbname string deprecated: dbname does not need to be explicitly configured
+- --db-config-erepl-flags uint deprecated: use db_flags
+- --db-config-erepl-flavor string deprecated: use db_flavor
+- --db-config-erepl-host string deprecated: use db_host
+- --db-config-erepl-pass string db erepl deprecated: use db_erepl_password
+- --db-config-erepl-port int deprecated: use db_port
+- --db-config-erepl-server_name string deprecated: use db_server_name
+- --db-config-erepl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-erepl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-erepl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-erepl-ssl-key string deprecated: use db_ssl_key
+- --db-config-erepl-uname string deprecated: use db_erepl_user (default vt_erepl)
+- --db-config-erepl-unixsocket string deprecated: use db_socket
+- --db-config-filtered-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-filtered-flags uint deprecated: use db_flags
+- --db-config-filtered-flavor string deprecated: use db_flavor
+- --db-config-filtered-host string deprecated: use db_host
+- --db-config-filtered-pass string db filtered deprecated: use db_filtered_password
+- --db-config-filtered-port int deprecated: use db_port
+- --db-config-filtered-server_name string deprecated: use db_server_name
+- --db-config-filtered-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-filtered-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-filtered-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-filtered-ssl-key string deprecated: use db_ssl_key
+- --db-config-filtered-uname string deprecated: use db_filtered_user (default vt_filtered)
+- --db-config-filtered-unixsocket string deprecated: use db_socket
+- --db-config-repl-charset string deprecated: use db_charset (default utf8mb4)
+- --db-config-repl-flags uint deprecated: use db_flags
+- --db-config-repl-flavor string deprecated: use db_flavor
+- --db-config-repl-host string deprecated: use db_host
+- --db-config-repl-pass string db repl deprecated: use db_repl_password
+- --db-config-repl-port int deprecated: use db_port
+- --db-config-repl-server_name string deprecated: use db_server_name
+- --db-config-repl-ssl-ca string deprecated: use db_ssl_ca
+- --db-config-repl-ssl-ca-path string deprecated: use db_ssl_ca_path
+- --db-config-repl-ssl-cert string deprecated: use db_ssl_cert
+- --db-config-repl-ssl-key string deprecated: use db_ssl_key
+- --db-config-repl-uname string deprecated: use db_repl_user (default vt_repl)
+- --db-config-repl-unixsocket string deprecated: use db_socket
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default file)
++ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default approle)
++ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+@@ -143,84 +50,82 @@ Usage of vttablet:
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+ --db_allprivs_password string db allprivs password
+ --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
+- --db_allprivs_user string db allprivs user userKey (default vt_allprivs)
++ --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
+ --db_app_password string db app password
+ --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
+- --db_app_user string db app user userKey (default vt_app)
++ --db_app_user string db app user userKey (default "vt_app")
+ --db_appdebug_password string db appdebug password
+ --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
+- --db_appdebug_user string db appdebug user userKey (default vt_appdebug)
+- --db_charset string Character set used for this tablet. (default utf8mb4)
++ --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
++ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+- --db_dba_user string db dba user userKey (default vt_dba)
++ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_erepl_password string db erepl password
+ --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
+- --db_erepl_user string db erepl user userKey (default vt_erepl)
++ --db_erepl_user string db erepl user userKey (default "vt_erepl")
+ --db_filtered_password string db filtered password
+ --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
+- --db_filtered_user string db filtered user userKey (default vt_filtered)
++ --db_filtered_user string db filtered user userKey (default "vt_filtered")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+ --db_port int tcp port
+ --db_repl_password string db repl password
+ --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
+- --db_repl_user string db repl user userKey (default vt_repl)
++ --db_repl_user string db repl user userKey (default "vt_repl")
+ --db_server_name string server name of the DB we are connecting to.
+ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+ --db_ssl_ca string connection ssl ca
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+- --db_ssl_mode value SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
++ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+ --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
++ --disable-replication-manager Disable replication manager to prevent replication repairs.
+ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --discovery_high_replication_lag_minimum_serving duration the replication lag that is considered too high when applying the min_number_serving_vttablets threshold (default 2h0m0s)
+- --discovery_low_replication_lag duration the replication lag that is considered low enough to be healthy (default 30s)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable-autocommit This flag is deprecated. Autocommit is always allowed. (default true)
+ --enable-consolidator Synonym to -enable_consolidator (default true)
+ --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+ --enable-lag-throttler Synonym to -enable_lag_throttler
+- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
+ --enable-tx-throttler Synonym to -enable_tx_throttler
+ --enable_consolidator This option enables the query consolidator. (default true)
+ --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+ --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+ --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+ --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+- --enable_query_plan_field_caching This option fetches & caches fields (columns) when storing query plans (default true)
+ --enable_replication_reporter Use polling to track replication lag.
+- --enable_semi_sync Enable semi-sync when configuring replication, on primary and replica tablets only (rdonly tablets will not ack).
+ --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+ --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+ --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+ --enforce-tableacl-config if this flag is true, vttablet will fail to start if a valid tableacl config does not exist
+ --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+- --file_backup_storage_root string root directory for the file backup storage
++ --external-compressor string command with arguments to use when compressing a backup.
++ --external-compressor-extension string extension to use when using an external compressor.
++ --external-decompressor string command with arguments to use when decompressing a backup.
++ --file_backup_storage_root string Root directory for the file backup storage.
+ --filecustomrules string file based custom rule path
+ --filecustomrules_watch set up a watch on the target file and reload query rules when it changes
+ --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
+ --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
+- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups
+- --gcs_backup_storage_root string root prefix for all backup-related object names
++ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
++ --gcs_backup_storage_root string Root prefix for all backup-related object names.
+ --gh-ost-path string override default gh-ost binary full path
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
++ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+@@ -229,8 +134,8 @@ Usage of vttablet:
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+@@ -240,6 +145,7 @@ Usage of vttablet:
+ --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+ --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+ --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
++ -h, --help display usage and exit
+ --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+ --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+ --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+@@ -248,27 +154,22 @@ Usage of vttablet:
+ --init_populate_metadata (init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.
+ --init_shard string (init parameter) shard to use for this tablet
+ --init_tablet_type string (init parameter) the tablet type to use for this tablet.
+- --init_tags value (init parameter) comma separated list of key:value pairs used to tag the tablet
++ --init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet
+ --init_timeout duration (init parameter) timeout to use for the init phase. (default 1m0s)
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces_to_watch value Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --legacy_replication_lag_algorithm use the legacy algorithm when selecting the vttablets for serving (default true)
+ --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_queries Enable query logging to syslog.
+ --log_queries_to_file string Enable query logging to the specified file
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
++ --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256)
+ --migration_check_interval duration Interval between migration checks (default 1m0s)
+- --min_number_serving_vttablets int the minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving (default 2)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+ --mycnf-file string path to my.cnf, if reading all config params from there
+ --mycnf_bin_log_path string mysql binlog path
+ --mycnf_data_dir string data directory for mysql
+@@ -287,48 +188,32 @@ Usage of vttablet:
+ --mycnf_slow_log_path string mysql slow query log path
+ --mycnf_socket_file string mysql socket file
+ --mycnf_tmp_dir string mysql tmp directory
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default mysql_clear_password)
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+ --mysql_server_version string MySQL server version to advertise.
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default grpc)
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --opentsdb_uri string URI of opentsdb /api/put method
+- --orc_api_password string (Optional) Basic auth password to authenticate with Orchestrator's HTTP API.
+- --orc_api_url string Address of Orchestrator's HTTP API (e.g. http://host:port/api/). Leave empty to disable Orchestrator integration.
+- --orc_api_user string (Optional) Basic auth username to authenticate with Orchestrator's HTTP API. Leave empty to disable basic auth.
+- --orc_discover_interval duration How often to ping Orchestrator's HTTP API endpoint to tell it we exist. 0 means never.
+- --orc_timeout duration Timeout for calls to Orchestrator's HTTP API (default 30s)
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pitr_gtid_lookup_timeout duration PITR restore parameter: timeout for fetching gtid from timestamp. (default 1m0s)
+- --pool-name-prefix string Deprecated
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+- --pprof string enable profiling
++ --pprof strings enable profiling
+ --pt-osc-path string override default pt-online-schema-change binary full path
+ --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s)
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --query-log-stream-handler string URL handler for streaming queries log (default /debug/querylog)
++ --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
+ --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+- --querylog-format string format for query logs ("text" or "json") (default text)
++ --querylog-format string format for query logs ("text" or "json") (default "text")
+ --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+- --queryserver-config-allowunsafe-dmls deprecated
+ --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+ --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+ --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+- --queryserver-config-max-dml-rows int query server max dml rows per statement, maximum number of rows allowed to return at a time for an update or delete with either 1) an equality where clauses on primary keys, or 2) a subselect statement. For update and delete statements in above two categories, vttablet will split the original query into multiple small queries based on this configuration value.
+ --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+- --queryserver-config-message-conn-pool-prefill-parallelism int DEPRECATED: Unused.
+- --queryserver-config-message-conn-pool-size int DEPRECATED
+ --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
++ --queryserver-config-olap-transaction-timeout float query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30)
+ --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+- --queryserver-config-pool-prefill-parallelism int query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+ --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+ --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+ --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+@@ -340,18 +225,17 @@ Usage of vttablet:
+ --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+ --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+ --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+- --queryserver-config-stream-pool-prefill-parallelism int query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
+ --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+ --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+ --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+ --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+ --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+ --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+- --queryserver-config-transaction-prefill-parallelism int query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
+ --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+ --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+ --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+ --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
++ --queryserver-enable-settings-pool Enable pooling of connections with modified system settings
+ --queryserver_enable_online_ddl Enable online DDL. (default true)
+ --redact-debug-ui-queries redact full queries and bind variables from debug UI
+ --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+@@ -362,18 +246,18 @@ Usage of vttablet:
+ --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
+ --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
+ --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
+- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided)
+- --s3_backup_aws_region string AWS region to use (default us-east-1)
+- --s3_backup_aws_retries int AWS request retries (default -1)
+- --s3_backup_force_path_style force the s3 path style
+- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors (default LogOff)
+- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file)
+- --s3_backup_storage_bucket string S3 bucket to use for backups
+- --s3_backup_storage_root string root prefix for all backup-related object names
+- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections
++ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
++ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
++ --s3_backup_aws_retries int AWS request retries. (default -1)
++ --s3_backup_force_path_style force the s3 path style.
++ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
++ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
++ --s3_backup_storage_bucket string S3 bucket to use for backups.
++ --s3_backup_storage_root string root prefix for all backup-related object names.
++ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
+ --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+ --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s)
+ --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+@@ -384,20 +268,19 @@ Usage of vttablet:
+ --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
++ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --statsd_address string Address for statsd client
+- --statsd_sample_rate float (default 1)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
++ --statsd_sample_rate float Sample rate for statsd metrics (default 1)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
+ --table-acl-config string path to table access checker config file; send SIGHUP to reload this file
+ --table-acl-config-reload-interval duration Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload
+- --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default hold,purge,evac,drop)
++ --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop")
+ --tablet-path string tablet alias
+ --tablet_config string YAML file config for tablet
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_filters value Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+@@ -411,18 +294,15 @@ Usage of vttablet:
+ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_manager_grpc_key string the key to use to connect
+ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default grpc)
+- --tablet_protocol string how to talk to the vttablets (default grpc)
+- --tablet_refresh_interval duration tablet refresh interval (default 1m0s)
+- --tablet_refresh_known_tablets tablet refresh reloads the tablet address/port map from topo in case it changes (default true)
+- --tablet_url_template string format string describing debug tablet url formatting. See the Go code for getTabletDebugURL() how to customize this. (default http://{{.GetTabletHostPort}})
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
+ --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+ --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+ --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default replica)
++ --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
+ --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default serfHealth)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+@@ -435,63 +315,37 @@ Usage of vttablet:
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+- --topo_read_concurrency int concurrent topo reads (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --topocustomrule_cell string topo cell for customrules file. (default global)
++ --topocustomrule_cell string topo cell for customrules file. (default "global")
+ --topocustomrule_path string path for customrules file. Disabled if empty.
+- --tracer string tracing service to use (default noop)
++ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
++ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
++ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
+ --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+- --transaction-log-stream-handler string URL handler for streaming transactions log (default /debug/txlog)
++ --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog")
+ --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+- --transaction_shutdown_grace_period float DEPRECATED: use shutdown_grace_period instead.
+ --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+ --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+ --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+- --tx-throttler-config string Synonym to -tx_throttler_config (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx-throttler-healthcheck-cells value Synonym to -tx_throttler_healthcheck_cells
+- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default target_replication_lag_sec: 2
+-max_replication_lag_sec: 10
+-initial_rate: 100
+-max_increase: 1
+-emergency_decrease: 0.5
+-min_duration_between_increases_sec: 40
+-max_duration_between_increases_sec: 62
+-min_duration_between_decreases_sec: 20
+-spread_backlog_across_sec: 20
+-age_bad_rate_after_sec: 180
+-bad_rate_increase: 0.1
+-max_rate_approach_threshold: 0.9
+-)
+- --tx_throttler_healthcheck_cells value A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
++ --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
++ --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells
++ --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
++ --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+ --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+ --use_super_read_only Set super_read_only flag when performing planned failover.
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+ --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+ --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+@@ -500,23 +354,24 @@ max_rate_approach_threshold: 0.9
+ --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+ --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+ --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence (default 15m0s)
++ --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence
+ --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+ --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+ --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+- --vreplication_tablet_type string comma separated list of tablet types used as a source (default in_order:REPLICA,PRIMARY)
++ --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY")
++ --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864)
+ --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+ --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+ --vtctld_addr string address of a vtctld instance
+- --vtgate_protocol string how to talk to vtgate (default grpc)
+- --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with -init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default /.*/)
++ --vtgate_protocol string how to talk to vtgate (default "grpc")
++ --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/")
+ --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear
+ --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default tar)
++ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
++ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
++ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
++ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
++ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
+ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vttestserver.diff b/doc/flags/14.0-to-15.0-transition/vttestserver.diff
new file mode 100644
index 00000000000..5d18d2e83e0
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vttestserver.diff
@@ -0,0 +1,332 @@
+diff --git a/flags/14.0/vttestserver.txt b/flags/15.0/vttestserver.txt
+index 755eba1..d30ab35 100644
+--- a/flags/14.0/vttestserver.txt
++++ b/flags/15.0/vttestserver.txt
+@@ -1,184 +1,144 @@
+ Usage of vttestserver:
+- --alsologtostderr log to standard error as well as files
+- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+- --app_pool_size int Size of the connection pool for app connections (default 40)
+- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
+- --backup_storage_implementation string which implementation to use for the backup storage feature
+- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression (default 2)
+- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup (default 10m0s)
+- --builtinbackup_progress duration how often to send progress updates when backing up large files (default 5s)
+- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+- --cells string Comma separated list of cells (default "test")
+- --charset string MySQL charset (default "utf8mb4")
+- --consul_auth_static_file string JSON File to read the topos/tokens from.
+- --cpu_profile string deprecated: use '-pprof=cpu' instead
+- --data_dir string Directory where the data files will be placed, defaults to a random directory under /vt/vtdataroot
+- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+- --db-credentials-file string db credentials file; send SIGHUP to reload this file
+- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+- --db-credentials-vault-addr string URL to Vault server
+- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+- --dba_pool_size int Size of the connection pool for dba connections (default 20)
+- --default_schema_dir string Default directory for initial schema files. If no schema is found in schema_dir, default to this location.
+- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+- --emit_stats If set, emit stats to push-based monitoring and stats backends
+- --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+- --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+- --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+- --external_topo_global_root string the path of the global topology data in the global topology server for vtcombo process
+- --external_topo_global_server_address string the address of the global topology server for vtcombo process
+- --external_topo_implementation string the topology implementation to use for vtcombo process
+- --extra_my_cnf string extra files to add to the config, separated by ':'
+- --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
+- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+- --grpc_auth_static_client_creds string when using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server
+- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+- --grpc_enable_tracing Enable GRPC tracing
+- --grpc_initial_conn_window_size int gRPC initial connection window size
+- --grpc_initial_window_size int gRPC initial window size
+- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+- --grpc_port int Port to listen on for gRPC calls
+- --grpc_prometheus Enable gRPC monitoring with Prometheus
+- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+- --grpc_server_initial_window_size int gRPC server initial window size
+- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+- --initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags.
+- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --keyspaces string Comma separated list of keyspaces (default "test_keyspace")
+- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_err_stacks log stack traces for errors
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --master_connect_retry duration Deprecated, use -replication_connect_retry (default 10s)
+- --max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000)
+- --mem-profile-rate int deprecated: use '-pprof=mem' instead (default 524288)
+- --min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000)
+- --mutex-profile-fraction int deprecated: use '-pprof=mutex' instead
+- --mysql_auth_server_static_file string JSON File to read the users/passwords from.
+- --mysql_auth_server_static_string string JSON representation of the users/passwords config.
+- --mysql_auth_static_reload_interval duration Ticker to reload credentials
+- --mysql_bind_host string which host to bind vtgate mysql listener to (default "localhost")
+- --mysql_clientcert_auth_method string client-side authentication method to use. Supported values: mysql_clear_password, dialog. (default "mysql_clear_password")
+- --mysql_only If this flag is set only mysql is initialized. The rest of the vitess components are not started. Also, the output specifies the mysql unix socket instead of the vtgate port.
+- --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms)
+- --mysql_server_version string MySQL server version to advertise.
+- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
+- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+- --null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1)
+- --num_shards string Comma separated shard count (one per keyspace) (default "2")
+- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+- --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
+- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+- --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.
+- --planner_version string planner_version is deprecated. Please use planner-version instead
+- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+- --port int Port to use for vtcombo. If this is 0, a random port will be chosen.
+- --pprof string enable profiling
+- --proto_topo string Define the fake cluster topology as a compact text format encoded vttest proto. See vttest.proto for more information.
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value
+- --rdonly_count int Rdonly tablets per shard (default 1)
+- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+- --replica_count int Replica tablets per shard (includes primary) (default 2)
+- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+- --rng_seed int The random number generator seed to use when initializing with random data (see also --initialize_with_random_data). Multiple runs with the same seed will result with the same initial data. (default 123)
+- --schema_dir string Directory for initial schema files. Within this dir, there should be a subdir for each keyspace. Within each keyspace dir, each file is executed as SQL after the database is created on each shard. If the directory contains a vschema.json file, it will be used as the vschema for the V3 API.
+- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+- --service_map value comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-vtworker
+- --snapshot_file string A MySQL DB snapshot file
+- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+- --stats_backend string The name of the registered push-based monitoring/stats backend to use
+- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+- --stats_common_tags string Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+- --stats_drop_variables string Variables to be dropped from the list of exported variables.
+- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+- --tablet_hostname string The hostname to use for the tablet otherwise it will be derived from OS' hostname (default "localhost")
+- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+- --tablet_manager_grpc_cert string the cert to use to connect
+- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+- --tablet_manager_grpc_key string the key to use to connect
+- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+- --tablet_manager_protocol string the protocol to use to talk to vttablet (default "grpc")
+- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+- --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+- --topo_consul_lock_session_ttl string TTL for consul session.
+- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+- --topo_global_root string the path of the global topology data in the global topology server
+- --topo_global_server_address string the address of the global topology server
+- --topo_implementation string the topology implementation to use
+- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+- --tracer string tracing service to use (default "noop")
+- --tracing-enable-logging whether to enable logging in the tracing service
+- --tracing-sampling-rate value sampling rate for the probabilistic jaeger sampler (default 0.1)
+- --tracing-sampling-type value sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default const)
+- --transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI")
+- --v value log level for V logs
+- --version print binary version
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate
+- --vtctl_client_protocol string the protocol to use to talk to the vtctl server (default "grpc")
+- --vtctld_grpc_ca string the server ca to use to validate servers when connecting
+- --vtctld_grpc_cert string the cert to use to connect
+- --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtctld_grpc_key string the key to use to connect
+- --vtctld_grpc_server_name string the server name to use to validate server certificate
+- --vtgate_grpc_ca string the server ca to use to validate servers when connecting
+- --vtgate_grpc_cert string the cert to use to connect
+- --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
+- --vtgate_grpc_key string the key to use to connect
+- --vtgate_grpc_server_name string the server name to use to validate server certificate
+- --vtgate_protocol string how to talk to vtgate (default "grpc")
+- --workflow_manager_init Enable workflow manager
+- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
+- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
+- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
++ --alsologtostderr log to standard error as well as files
++ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
++ --app_pool_size int Size of the connection pool for app connections (default 40)
++ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
++ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
++ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
++ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
++ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
++ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
++ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
++ --cells strings Comma separated list of cells (default [test])
++ --charset string MySQL charset (default "utf8mb4")
++ --compression-engine-name string compressor engine used for compression. (default "pargzip")
++ --compression-level int what level to pass to the compressor. (default 1)
++ --consul_auth_static_file string JSON File to read the topos/tokens from.
++ --data_dir string Directory where the data files will be placed, defaults to a random directory under /vt/vtdataroot
++ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
++ --dba_pool_size int Size of the connection pool for dba connections (default 20)
++ --default_schema_dir string Default directory for initial schema files. If no schema is found in schema_dir, default to this location.
++ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
++ --enable_direct_ddl Allow users to submit direct DDL statements (default true)
++ --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
++ --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
++ --external-compressor string command with arguments to use when compressing a backup.
++ --external-compressor-extension string extension to use when using an external compressor.
++ --external-decompressor string command with arguments to use when decompressing a backup.
++ --external_topo_global_root string the path of the global topology data in the global topology server for vtcombo process
++ --external_topo_global_server_address string the address of the global topology server for vtcombo process
++ --external_topo_implementation string the topology implementation to use for vtcombo process
++ --extra_my_cnf string extra files to add to the config, separated by ':'
++ --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
++ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
++ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
++ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
++ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
++ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
++ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
++ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
++ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
++ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
++ --grpc_enable_tracing Enable gRPC tracing.
++ --grpc_initial_conn_window_size int gRPC initial connection window size
++ --grpc_initial_window_size int gRPC initial window size
++ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
++ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
++ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
++ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
++ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
++ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
++ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
++ --grpc_prometheus Enable gRPC monitoring with Prometheus.
++ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
++ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
++ --grpc_server_initial_window_size int gRPC server initial window size
++ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
++ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
++ -h, --help display usage and exit
++ --initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags.
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --keyspaces strings Comma separated list of keyspaces (default [test_keyspace])
++ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000)
++ --min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000)
++ --mysql_bind_host string which host to bind vtgate mysql listener to (default "localhost")
++ --mysql_only If this flag is set only mysql is initialized. The rest of the vitess components are not started. Also, the output specifies the mysql unix socket instead of the vtgate port.
++ --mysql_server_version string MySQL server version to advertise.
++ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
++ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
++ --null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1)
++ --num_shards strings Comma separated shard count (one per keyspace) (default [2])
++ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
++ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
++ --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
++ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
++ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.
++ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
++ --port int Port to use for vtcombo. If this is 0, a random port will be chosen.
++ --pprof strings enable profiling
++ --proto_topo string Define the fake cluster topology as a compact text format encoded vttest proto. See vttest.proto for more information.
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value
++ --rdonly_count int Rdonly tablets per shard (default 1)
++ --replica_count int Replica tablets per shard (includes primary) (default 2)
++ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
++ --rng_seed int The random number generator seed to use when initializing with random data (see also --initialize_with_random_data). Multiple runs with the same seed will result with the same initial data. (default 123)
++ --schema_dir string Directory for initial schema files. Within this dir, there should be a subdir for each keyspace. Within each keyspace dir, each file is executed as SQL after the database is created on each shard. If the directory contains a vschema.json file, it will be used as the vschema for the V3 API.
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
++ --snapshot_file string A MySQL DB snapshot file
++ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
++ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
++ --tablet_hostname string The hostname to use for the tablet otherwise it will be derived from OS' hostname (default "localhost")
++ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
++ --tablet_manager_grpc_cert string the cert to use to connect
++ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
++ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
++ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
++ --tablet_manager_grpc_key string the key to use to connect
++ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
++ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
++ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
++ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
++ --topo_consul_lock_session_ttl string TTL for consul session.
++ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
++ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
++ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
++ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
++ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
++ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
++ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
++ --transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI")
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate
++ --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
++ --vtctld_grpc_ca string the server ca to use to validate servers when connecting
++ --vtctld_grpc_cert string the cert to use to connect
++ --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtctld_grpc_key string the key to use to connect
++ --vtctld_grpc_server_name string the server name to use to validate server certificate
++ --vtgate_grpc_ca string the server ca to use to validate servers when connecting
++ --vtgate_grpc_cert string the cert to use to connect
++ --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
++ --vtgate_grpc_key string the key to use to connect
++ --vtgate_grpc_server_name string the server name to use to validate server certificate
++ --workflow_manager_init Enable workflow manager
++ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
++ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
++ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
++ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
++ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
++ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
++ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
++ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/doc/flags/14.0-to-15.0-transition/vttlstest.diff b/doc/flags/14.0-to-15.0-transition/vttlstest.diff
new file mode 100644
index 00000000000..d0c86525b9a
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/vttlstest.diff
@@ -0,0 +1,37 @@
+diff --git a/flags/14.0/vttlstest.txt b/flags/15.0/vttlstest.txt
+index 87321df..e149cf2 100644
+--- a/flags/14.0/vttlstest.txt
++++ b/flags/15.0/vttlstest.txt
+@@ -1,13 +1,19 @@
+-Usage of vttlstest:
+- --alsologtostderr log to standard error as well as files
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --root string root directory for certificates and keys (default ".")
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --v value log level for V logs
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
++vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.
++
++Usage:
++ vttlstest [command]
++
++Available Commands:
++ CreateCA Create certificate authority
++ CreateCRL Create certificate revocation list
++ CreateIntermediateCA Create intermediate certificate authority
++ CreateSignedCert Create signed certificate
++ RevokeCert Revoke a certificate
++ completion Generate the autocompletion script for the specified shell
++ help Help about any command
++
++Flags:
++ -h, --help help for vttlstest
++ --root string root directory for all artifacts (default ".")
++
++Use "vttlstest [command] --help" for more information about a command.
diff --git a/doc/flags/14.0-to-15.0-transition/zk.diff b/doc/flags/14.0-to-15.0-transition/zk.diff
new file mode 100644
index 00000000000..9fb66007217
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/zk.diff
@@ -0,0 +1,14 @@
+diff --git a/flags/14.0/zk.txt b/flags/15.0/zk.txt
+new file mode 100644
+index 0000000..443bf0b
+--- /dev/null
++++ b/flags/15.0/zk.txt
+@@ -0,0 +1,8 @@
++Usage of zk:
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --server string server(s) to connect to
diff --git a/doc/flags/14.0-to-15.0-transition/zkctl.diff b/doc/flags/14.0-to-15.0-transition/zkctl.diff
new file mode 100644
index 00000000000..4567181a701
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/zkctl.diff
@@ -0,0 +1,24 @@
+diff --git a/flags/14.0/zkctl.txt b/flags/15.0/zkctl.txt
+new file mode 100644
+index 0000000..e7e41c4
+--- /dev/null
++++ b/flags/15.0/zkctl.txt
+@@ -0,0 +1,18 @@
++Usage of zkctl:
++ --alsologtostderr log to standard error as well as files
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
++ --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/doc/flags/14.0-to-15.0-transition/zkctld.diff b/doc/flags/14.0-to-15.0-transition/zkctld.diff
new file mode 100644
index 00000000000..89576d5b422
--- /dev/null
+++ b/doc/flags/14.0-to-15.0-transition/zkctld.diff
@@ -0,0 +1,37 @@
+diff --git a/flags/14.0/zkctld.txt b/flags/15.0/zkctld.txt
+index 5bad4f2..6ec026b 100644
+--- a/flags/14.0/zkctld.txt
++++ b/flags/15.0/zkctld.txt
+@@ -1,14 +1,19 @@
+ Usage of zkctld:
+- --alsologtostderr log to standard error as well as files
+- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+- --log_backtrace_at value when logging hits line file:N, emit a stack trace
+- --log_dir string If non-empty, write log files in this directory
+- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+- --logtostderr log to standard error instead of files
+- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+- --stderrthreshold value logs at or above this threshold go to stderr (default 1)
+- --v value log level for V logs
+- --vmodule value comma-separated list of pattern=N settings for file-filtered logging
+- --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default 6@:3801:3802:3803)
+- --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
++ --alsologtostderr log to standard error as well as files
++ -h, --help display usage and exit
++ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
++ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
++ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
++ --log_dir string If non-empty, write log files in this directory
++ --log_err_stacks log stack traces for errors
++ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
++ --logtostderr log to standard error instead of files
++ --pprof strings enable profiling
++ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
++ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
++ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
++ --v Level log level for V logs
++ -v, --version print binary version
++ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
++ --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
++ --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/ReleaseInstructions.md
index 014487568cc..20e0b97dfb3 100644
--- a/doc/internal/ReleaseInstructions.md
+++ b/doc/internal/ReleaseInstructions.md
@@ -55,7 +55,7 @@ Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`.
## Release Branches
Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named
-`release-X.Y`. This branch should diverge from `main` when the code freeze when the release
+`release-X.Y`. This branch should diverge from `main` when the release
is declared, after which point only bugfix PRs should be cherry-picked onto the branch.
All other activity on `main` will go out with a subsequent major or minor release.
@@ -109,6 +109,7 @@ Therefore, file a JIRA ticket with Sonatype to get added ([example for a differe
Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html).
Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`.
+for Mac you need to install 'gnupg' via 'brew install gnupg'
#### Login configuration
@@ -118,9 +119,17 @@ Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [i
## Release Cutover
-In this section we describe our current release process. We begin with a short [**overview**](#overview).
+In this section we describe our current release process. We begin with a list of [**pre-requisite for the release team**](#pre-requisites) and with a short [**overview**](#overview).
The release process is divided into three parts: [**Pre-Release**](#pre-release), [**Release**](#release), [**Post-Release**](#post-release), which are detailed after the overview.
+### Pre-Requisites
+
+This section highlights the different pre-requisites the release team has to meet before releasing.
+
+- The tool `gh` must be installed locally and ready to be used.
+- You must have access to the Java release, more information in the [**Java Packages**](#java-packages) section.
+- You must be able to create branches and have admin right on the `vitessio/vitess` and `planetscale/vitess-operator` repositories.
+
### Overview
#### Schedule
@@ -131,7 +140,8 @@ We usually create the RC1 during the first week of the month, and the GA version
#### Code Freeze
Before creating RC1, there is a code freeze. Assuming the release of RC1 happens on a Tuesday, the release branch will be frozen Friday of the previous week.
-This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches. However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday)
+This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches.
+However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday)
Regarding patch releases, no code freeze is planned.
#### Tracking Issue for each Release
@@ -151,8 +161,9 @@ That includes:
> - This includes write access to the Vitess repository and to the Maven repository.
- **Preparing and cleaning the release notes summary.**
> - One or more Pull Requests have to be submitted in advance to create and update the release summary.
- > - The summary files are located in: `./doc/releasenotes/*_*_*_summary.md`.
+ > - The summary files are located in: `./changelog/*.0/*.*.*/summary.md`.
> - The summary file for a release candidate is the same as the one for the GA release.
+ > - Make sure to run `go run ./go/tools/releases/releases.go` to update the `changelog` directory with the latest release notes.
- **Finishing the blog post, and coordinating with the different organizations for cross-posting. Usually CNCF and PlanetScale. This step applies only for GA releases.**
> - The blog post must be finished and reviewed.
> - A Pull Request on the website repository of Vitess has to be created so we can easily publish the blog during the release day.
@@ -160,17 +171,29 @@ That includes:
> - As soon as we go into code freeze, if we are doing an RC, create the release branch.
> - If we are doing a GA release, do not merge any new Pull Requests.
> - The guide on how to do a code freeze is available in the [How To Code Freeze](#how-to-code-freeze) section.
+ > - It is not advised to merge a PR during code freeze, but if it is deemed necessary by the release lead, then follow the steps in [How To Merge During Code Freeze](#how-to-merge-during-code-freeze) section.
+- **Create the Vitess release.**
+ > - A guide on how to create a Vitess release is available in the [How to prepare the release of Vitess](#how-to-prepare-the-release-of-vitess) section.
+ > - This step will create a Release Pull Request, it must be reviewed and merged before the release day. The release commit will be used to tag the release.
- **Preparing the Vitess Operator release.**
> - While the Vitess Operator is located in a different repository, we also need to do a release for it.
> - The Operator follows the same cycle: RC1 -> GA -> Patches.
> - Documentation for the pre-release of the Vitess Operator is available [here](https://github.com/planetscale/vitess-operator/blob/main/docs/release-process.md#prepare-for-release).
+- **Update the release notes on `main`.**
+ > - One Pull Request against `main` must be created, it will contain the new release notes that we are adding in the Release Pull Request.
+ > - We open this Pull Request now to avoid waiting on the CI during release day.
+ > - All future changes to the release notes during the code freeze will need to be ported to both PRs: the one on `main` and the Release Pull Request.
### Release
On the release day, there are several things to do:
-- **Create the Vitess release.**
- > - A guide on how to create a Vitess release is available in the [How To Release Vitess](#how-to-release-vitess) section.
+- **Merge the Release Pull Request.**
+ > - During the code freeze, we created a Release Pull Request. It must be merged.
+- **Tag the Vitess release.**
+ > - A guide on how to tag a version is available in the [How To Release Vitess](#how-to-release-vitess) section.
+- **Update the release notes on `main`.**
+ > - During the code freeze, we created a Pull Request against `main` to update the release notes. It must be merged.
- **Create the corresponding Vitess operator release.**
> - Applies only to versions greater or equal to `v14.0.0`.
> - If we are doing an RC release, then we will need to create the Vitess Operator RC too. If we are doing a GA release, we're also doing a GA release in the Operator.
@@ -192,16 +215,72 @@ On the release day, there are several things to do:
> - After a while, those elements will finish their execution and their status will be green.
> - This step is even more important for GA releases as we often include a link to _arewefastyet_ in the blog post.
> - The benchmarks need to complete before announcing the blog posts or before they get cross-posted.
-- **Update the release notes on the release branch and on `main`.**
- > - Two new Pull Requests have to be created.
- > - One against `main`, it will contain only the new release notes.
- > - And another against the release branch, this one contains the release notes and the release commit. (The commit on which we did `git tag`)
+- **Go back to dev mode on the release branch.**
+ > - The version constants across the codebase must be updated to `SNAPSHOT`.
+- **Build k8s Docker images and publish them**
+ > - The docker image for `base`, `lite`, etc are built automatically by DockerHub. The k8s images however are dependent on these images and are required to be built manually.
+ > - These images should be built after the `base` image has been built and available on DockerHub.
+ > - To build and publish these images, run `./release.sh` from the directory `vitess/docker`.
### Post-Release
Once the release is over, we need to announce it on both Slack and Twitter. We also want to make sure the blog post was cross-posted, if applicable.
We need to verify that _arewefastyet_ has finished the benchmark too.
+### How to prepare the release of Vitess
+
+> In this example our current version is `v14.0.3` and we release the version `v15.0.0`.
+> Alongside Vitess' release, we also release a new version of the operator.
+> Since we are releasing a release candidate here, the new version of the operator will also be a release candidate.
+> In this example, the new operator version is `2.8.0`.
+>
+> It is important to note that before the RC, there is a code freeze during which we create the release branch.
+>
+> The release branch in this example is `release-15.0`.
+>
+> The example also assumes that `origin` is the `vitessio/vitess` remote.
+
+1. Fetch `github.com/vitessio/vitess`'s remote.
+ ```shell
+ git fetch origin
+ ```
+
+2. Creation of the Release Pull Request.
+ > This step will create the Release Pull Request that will then be reviewed ahead of the release day.
+ > The merge commit of that Pull Request will be used during the release day to tag the release.
+ 1. Run the `create_release` script using the Makefile:
+ 1. Release Candidate:
+ ```shell
+ make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" VTOP_VERSION="2.8.0-rc1" create_release
+ ```
+ 2. General Availability:
+ ```shell
+ make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0" VTOP_VERSION="2.8.0" create_release
+ ```
+
+ The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step.
+
+ 2. Run the following command to generate the release notes:
+ 1. Release Candidate:
+ ```shell
+ go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0-rc1" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]]
+ ```
+ 2. General Availability:
+ ```shell
+ go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]]
+ ```
+
+ > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit.
+ In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default).
+
+ This command will generate the release notes by looking at all the commits between the tag `v14.0.3` and the reference `HEAD`.
+ It will also use the file located in `./changelog/15.0/15.0.0/summary.md` to prefix the release notes with a text that the maintainers wrote before the release.
+ Please verify the generated release notes to make sure it is well-formatted and all the bookmarks are generated properly.
+
+
+3. Follow the instruction prompted by the `create_release` Makefile command's output in order to push the newly created branch and create the Release Pull Request on GitHub.
+
+4. If we are doing an RC release it means we created a new branch from `main`. We need to update `main` with the next SNAPSHOT version. If `main` was on `15.0.0-SNAPSHOT`, we need to update it to `16.0.0-SNAPSHOT`. A simple find and replace in the IDE is sufficient, there only a handful of files that must be changed: `version.go` and several java files.
### How To Release Vitess
This section is divided into two parts:
@@ -210,7 +289,10 @@ This section is divided into two parts:
#### Creation of the tags and release notes
-> In this example our current version is `v14` and we release the version `v15.0.0`.
+> This step implies that you have created a [Release Pull Request](#how-to-prepare-the-release-of-vitess) beforehand and that it has been reviewed.
+> The merge commit of this Release Pull Request will be used to tag the release.
+>
+> In this example our current version is `v14.0.3` and we release the version `v15.0.0`.
> Alongside Vitess' release, we also release a new version of the operator.
> Since we are releasing a release candidate here, the new version of the operator will also be a release candidate.
> In this example, the new operator version is `2.8.0`.
@@ -226,37 +308,22 @@ This section is divided into two parts:
git fetch origin
```
-2. Creation of the release notes and tags.
- 1. Run the release script using the Makefile:
- 1. Release Candidate:
- ```shell
- make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" VTOP_VERSION="2.8.0-rc1" do_release
- ```
- 2. General Availability:
- ```shell
- make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0" DEV_VERSION="15.0.1-SNAPSHOT" VTOP_VERSION="2.8.0" do_release
- ```
+2. Checkout to the merge commit of the Release Pull Request.
- The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step.
+3. Tag the release and push the tags
+ ```shell
+ git tag v15.0.0 && git tag v0.15.0 && git push origin v15.0.0 && git push origin v0.15.0
+ ```
- 2. Run the following command to generate the release notes:
- 1. Release Candidate:
- ```shell
- make VERSION="v15.0.0-rc1" FROM="v14.0.0" TO="HEAD" SUMMARY="./doc/releasenotes/15_0_0_summary.md" release-notes
- ```
- 2. General Availability:
- ```shell
- make VERSION="v15.0.0-rc1" FROM="v14.0.0" TO="HEAD" SUMMARY="./doc/releasenotes/15_0_0_summary.md" release-notes
- ```
- This command will generate the release notes by looking at all the commits between the tag `v14.0.0` and the reference `HEAD`.
- It will also use the file located in `./doc/releasenotes/15_0_0_summary.md` to prefix the release notes with a text that the maintainers wrote before the release.
+4. Create a Pull Request against the `main` branch with the release notes found in `./changelog/15.0/15.0.0/15_0_0_*.md`.
-
-3. Follow the instruction prompted by the `do_release` Makefile command's output in order to push the tags, branches and create the Pull Requests.
-
-4. Create a Pull Request against the `main` branch with the newly created release notes.
-
-5. Release the tag on GitHub UI as explained in the following section.
+5. Run the back to dev mode tool.
+ ```shell
+ make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" back_to_dev_mode
+ ```
+ > You will then need to follow the instructions given by the output of the back_to_dev_mode Makefile command. You will need to push the newly created branch and open a Pull Request.
+
+6. Release the tag on GitHub UI as explained in the following section.
#### Creating Release or Release Candidate on the GitHub UI
@@ -301,6 +368,9 @@ git fetch --all
git checkout -b release-15.0 origin/main
```
+> Important: after creating the new branch `release-15.0`, we need to create new branch protection rules on the GitHub UI.
+> The rules can be copied from the rules that are on the `main` branch.
+
The new branch will be based on `origin/main`, here `origin` points to `vitessio/vitess`. If we are not doing a release candidate, then the branch already exists and we can checkout on it.
Now, if we are doing a GA release, let's update the branch:
@@ -317,6 +387,26 @@ Finally, let's run the code freeze script:
The script will prompt the command that will allow you to push the code freeze change. Once pushed, open a PR that will be merged on `release-15.0`.
+Remember, you should also disable the Launchable integration from the newly created release branch.
+
+### How To Merge During Code Freeze
+
+> **Warning:** It is not advised to merge a PR during code-freeze. If it is deemed absolutely necessary, then the following steps can be followed.
+
+The PR that needs to be merged will be failing on the `Code Freeze` CI. To merge this PR, we'll have to mark this CI action as not required.
+You will need administrator privileges on the vitess repository to be able to make this change.
+
+1. Go to the GitHub repository and click on `Settings`.
+2. Under the `Code and automation` section, select `Branches`.
+3. Find the branch that you want to merge the PR against and then select `Edit`.
+4. Scroll down to find the list of required checks.
+5. Within this list find `Code Freeze` and click on the cross next to it to remove it from this list.
+6. Save your changes on the bottom of the page.
+7. Refresh the page of the PR, and you should be able to merge it.
+8. After merging the PR, you need to do 2 more things -
+ 1. Add `Code Freeze` back as a required check.
+ 2. Check if the release PR has any merge conflicts. If it does, fix them and push.
+
### Java Packages: Deploy & Release
> **Warning:** This section's steps need to be executed only when releasing a new major version of Vitess,
@@ -324,12 +414,12 @@ The script will prompt the command that will allow you to push the code freeze c
>
> For this example, we assume we juste released `v12.0.0`.
-1. Checkout to the release commit.
+1. Checkout to the release commit.
```shell
git checkout v12.0.0
```
-2. Run `gpg-agent` to avoid that Maven will constantly prompt you for the password of your private key.
+2. Run `gpg-agent` to avoid that Maven will constantly prompt you for the password of your private key. Note that this can print error messages that can be ignored on Mac.
```bash
eval $(gpg-agent --daemon --no-grab --write-env-file $HOME/.gpg-agent-info)
@@ -337,11 +427,20 @@ The script will prompt the command that will allow you to push the code freeze c
export GPG_AGENT_INFO
```
-3. Deploy (upload) the Java code to the oss.sonatype.org repository:
+3. Export following to avoid any version conflicts
+ ```bash
+ export MAVEN_OPTS="--add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.text=ALL-UNNAMED
+ --add-opens=java.desktop/java.awt.font=ALL-UNNAMED"
+ ```
+
+4. Deploy (upload) the Java code to the oss.sonatype.org repository:
> **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).
```bash
+ cd ./java/
mvn clean deploy -P release -DskipTests
cd ..
```
+
+5. It will take some time for artifacts to appear on [maven directory](https://mvnrepository.com/artifact/io.vitess/vitess-client)
diff --git a/doc/releasenotes/15_0_0_changelog.md b/doc/releasenotes/15_0_0_changelog.md
deleted file mode 100644
index 6b064865219..00000000000
--- a/doc/releasenotes/15_0_0_changelog.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Changelog of Vitess v15.0.0
-
-### Bug fixes
-#### Query Serving
- * fix: scalar aggregation engine primitive #10465
- * fix: aggregation empty row on join with grouping and aggregations #10480
-### CI/Build
-#### Governance
- * Update the comment for review checklist with an item for CI workflows #10471
-### Documentation
-#### CLI
- * [vtctldclient] Update CLI docs for usages, flags, and aliases #10502
-#### VTAdmin
- * [vtadmin] Document known issue with node versions 17+ #10483
-### Enhancement
-#### Build/CI
- * Add name to static check workflow #10470
-#### Query Serving
- * Refactor aggregation AST structs #10347
- * fix: change planner_version to planner-version everywhere #10453
- * Add support for alter table rename column #10469
- * schemadiff: `ColumnRenameStrategy` in DiffHints #10472
- * Add parsing support for performance schema functions #10478
- * schemadiff: TableRenameStrategy in DiffHints #10479
- * OnlineDDL executor: adding log entries #10482
-### Internal Cleanup
-#### General
- * Remove v2 resharding fields #10409
-#### Query Serving
- * Reduce shift-reduce conflicts #10500
- * feat: don't stop if compilation errors are happening on the generated files #10506
-#### VTAdmin
- * [vtadmin] Rename ERS/PRS pools+flags properly #10460
-#### web UI
- * Remove sharding_column_name and sharding_column_type from vtctld2 #10459
-### Release
-#### General
- * Post release `v14.0.0-RC1` steps #10458
-### Testing
-#### Build/CI
- * test: reduce number of vttablets to start in the tests #10491
-#### VTAdmin
- * [vtadmin] authz tests - tablet actions #10457
- * [vtadmin] Add authz tests for remaining non-schema related actions #10481
- * [vtadmin] Add schema-related authz tests #10486
-
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 0e0faff7974..8e045be276c 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -12,17 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# NOTE: This file is also symlinked as "Dockerfile" in the root of our
-# repository because the automated build feature on Docker Hub does not
-# allow to specify a different build context. It always assumes that the
-# build context is the same directory as the Dockerfile is in.
-# "make build" below must be called in our repository's root and
-# therefore we need to have the symlinked "Dockerfile" in there as well.
-# TODO(mberlin): Remove the symlink and this note once
-# https://github.com/docker/hub-feedback/issues/292 is fixed.
-
-ARG bootstrap_version=11
-ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
+ARG bootstrap_version=21
+ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}"
@@ -32,6 +23,12 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
# Re-copy sources from working tree
USER root
COPY . /vt/src/vitess.io/vitess
diff --git a/docker/base/Dockerfile.mariadb b/docker/base/Dockerfile.mariadb
index 70e01e5cefa..7ab85a173bc 100644
--- a/docker/base/Dockerfile.mariadb
+++ b/docker/base/Dockerfile.mariadb
@@ -1,4 +1,4 @@
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mariadb"
FROM "${image}"
@@ -9,6 +9,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
+# Allows private repo go dependencies
+ARG GOPRIVATE
+ARG GH_ACCESS_TOKEN
+
# Re-copy sources from working tree
USER root
COPY . /vt/src/vitess.io/vitess
diff --git a/docker/base/Dockerfile.mariadb103 b/docker/base/Dockerfile.mariadb103
index 53d8789b563..24978c0c5d9 100644
--- a/docker/base/Dockerfile.mariadb103
+++ b/docker/base/Dockerfile.mariadb103
@@ -1,4 +1,4 @@
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103"
FROM "${image}"
@@ -9,6 +9,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
+# Allows private repo go dependencies
+ARG GOPRIVATE
+ARG GH_ACCESS_TOKEN
+
# Re-copy sources from working tree
USER root
COPY . /vt/src/vitess.io/vitess
diff --git a/docker/base/Dockerfile.mysql80 b/docker/base/Dockerfile.mysql80
index 2689dd9772e..d490d091aa8 100644
--- a/docker/base/Dockerfile.mysql80
+++ b/docker/base/Dockerfile.mysql80
@@ -1,4 +1,4 @@
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}"
@@ -9,6 +9,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
+# Allows private repo go dependencies
+ARG GOPRIVATE
+ARG GH_ACCESS_TOKEN
+
# Re-copy sources from working tree
USER root
COPY . /vt/src/vitess.io/vitess
diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57
index cd32498f91f..67db7f15394 100644
--- a/docker/base/Dockerfile.percona57
+++ b/docker/base/Dockerfile.percona57
@@ -1,4 +1,18 @@
-ARG bootstrap_version=11
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}"
@@ -9,6 +23,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
+# Allows private repo go dependencies
+ARG GOPRIVATE
+ARG GH_ACCESS_TOKEN
+
# Re-copy sources from working tree
USER root
COPY . /vt/src/vitess.io/vitess
diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80
index 8a140daed45..3c8fcd8bcfa 100644
--- a/docker/base/Dockerfile.percona80
+++ b/docker/base/Dockerfile.percona80
@@ -1,4 +1,18 @@
-ARG bootstrap_version=11
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}"
@@ -9,6 +23,16 @@ ARG CGO_ENABLED=0
# Allows docker builds to set the BUILD_NUMBER
ARG BUILD_NUMBER
+# Allows docker builds to set the BUILD_GIT_BRANCH
+ARG BUILD_GIT_BRANCH
+
+# Allows docker builds to set the BUILD_GIT_REV
+ARG BUILD_GIT_REV
+
+# Allows private repo go dependencies
+ARG GOPRIVATE
+ARG GH_ACCESS_TOKEN
+
# Re-copy sources from working tree
USER root
COPY . /vt/src/vitess.io/vitess
diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md
index 7604052f33e..05a4f42e4b4 100644
--- a/docker/bootstrap/CHANGELOG.md
+++ b/docker/bootstrap/CHANGELOG.md
@@ -44,4 +44,12 @@ List of changes between bootstrap image versions.
## [11] - 2022-08-31
### Changes
-- Update build to golang 1.18.5
\ No newline at end of file
+- Update build to golang 1.18.5
+
+## [12] - 2022-10-14
+### Changes
+- Update build to golang 1.18.7
+
+## [12] - 2022-12-08
+### Changes
+- Update build to golang 1.18.9
\ No newline at end of file
diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common
index d3f9af78771..f518e994b8e 100644
--- a/docker/bootstrap/Dockerfile.common
+++ b/docker/bootstrap/Dockerfile.common
@@ -1,4 +1,4 @@
-FROM --platform=linux/amd64 golang:1.18.5-buster
+FROM --platform=linux/amd64 golang:1.21.10-bullseye
# Install Vitess build dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
diff --git a/docker/bootstrap/Dockerfile.mysql57 b/docker/bootstrap/Dockerfile.mysql57
index 4e9b335ddac..4d79be9d3ec 100644
--- a/docker/bootstrap/Dockerfile.mysql57
+++ b/docker/bootstrap/Dockerfile.mysql57
@@ -5,7 +5,7 @@ FROM --platform=linux/amd64 "${image}"
# Install MySQL 5.7
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates && \
- for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \
+ for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com A8D3785C && break; done && \
add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' && \
for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \
diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80
index e064c638d99..9310d329d51 100644
--- a/docker/bootstrap/Dockerfile.mysql80
+++ b/docker/bootstrap/Dockerfile.mysql80
@@ -5,8 +5,8 @@ FROM --platform=linux/amd64 "${image}"
# Install MySQL 8.0
RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \
- for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \
- add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' && \
+ for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com A8D3785C && break; done && \
+ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' && \
for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \
{ \
diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile
index 30ff33952bc..1967aea538e 100644
--- a/docker/k8s/Dockerfile
+++ b/docker/k8s/Dockerfile
@@ -51,6 +51,7 @@ COPY --from=base /vt/bin/vtgate /vt/bin/
COPY --from=base /vt/bin/vttablet /vt/bin/
COPY --from=base /vt/bin/vtbackup /vt/bin/
COPY --from=base /vt/bin/vtadmin /vt/bin/
+COPY --from=base /vt/bin/vtorc /vt/bin/
# copy web admin files
COPY --from=base $VTROOT/web /vt/web/
diff --git a/docker/k8s/logrotate/Dockerfile b/docker/k8s/logrotate/Dockerfile
index 4f109ce3c68..a6054b78a26 100644
--- a/docker/k8s/logrotate/Dockerfile
+++ b/docker/k8s/logrotate/Dockerfile
@@ -16,9 +16,9 @@ ARG DEBIAN_VER=stable-slim
FROM debian:${DEBIAN_VER}
-COPY docker/k8s/logrotate/logrotate.conf /vt/logrotate.conf
+COPY logrotate.conf /vt/logrotate.conf
-COPY docker/k8s/logrotate/rotate.sh /vt/rotate.sh
+COPY rotate.sh /vt/rotate.sh
RUN mkdir -p /vt && \
apt-get update && \
diff --git a/docker/k8s/logtail/Dockerfile b/docker/k8s/logtail/Dockerfile
index a5d1d8340d3..b64fe5b3b6f 100644
--- a/docker/k8s/logtail/Dockerfile
+++ b/docker/k8s/logtail/Dockerfile
@@ -18,7 +18,7 @@ FROM debian:${DEBIAN_VER}
ENV TAIL_FILEPATH /dev/null
-COPY docker/k8s/logtail/tail.sh /vt/tail.sh
+COPY tail.sh /vt/tail.sh
RUN mkdir -p /vt && \
apt-get update && \
diff --git a/docker/k8s/vtadmin/Dockerfile b/docker/k8s/vtadmin/Dockerfile
index fe8c8ad5593..837ac8a525a 100644
--- a/docker/k8s/vtadmin/Dockerfile
+++ b/docker/k8s/vtadmin/Dockerfile
@@ -43,7 +43,7 @@ COPY --from=k8s /vt/bin/vtadmin /vt/bin/
COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --chown=nginx --from=node /vt/web/vtadmin/build /var/www/
-COPY --chown=nginx docker/k8s/vtadmin/default.conf /etc/nginx/templates/default.conf.template
+COPY --chown=nginx default.conf /etc/nginx/templates/default.conf.template
# command to run nginx is in the base image
# https://github.com/nginxinc/docker-nginx-unprivileged/blob/main/stable/alpine/Dockerfile#L150
diff --git a/docker/k8s/vtorc/Dockerfile b/docker/k8s/vtorc/Dockerfile
new file mode 100644
index 00000000000..b62b30ee676
--- /dev/null
+++ b/docker/k8s/vtorc/Dockerfile
@@ -0,0 +1,38 @@
+# Copyright 2019 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG VT_BASE_VER=latest
+ARG DEBIAN_VER=stable-slim
+
+FROM vitess/k8s:${VT_BASE_VER} AS k8s
+
+FROM debian:${DEBIAN_VER}
+
+# Set up Vitess environment (just enough to run pre-built Go binaries)
+ENV VTROOT /vt
+
+# Prepare directory structure.
+RUN mkdir -p /vt/bin && mkdir -p /vtdataroot
+
+# Copy certs to allow https calls
+COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+
+# Copy binaries
+COPY --from=k8s /vt/bin/vtorc /vt/bin/
+
+# add vitess user/group and add permissions
+RUN groupadd -r --gid 2000 vitess && \
+ useradd -r -g vitess --uid 1000 vitess && \
+ chown -R vitess:vitess /vt && \
+ chown -R vitess:vitess /vtdataroot
diff --git a/docker/lite/Dockerfile.alpine b/docker/lite/Dockerfile.alpine
index 46f2b0386d3..967787f33b9 100644
--- a/docker/lite/Dockerfile.alpine
+++ b/docker/lite/Dockerfile.alpine
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mariadb b/docker/lite/Dockerfile.mariadb
index 1eb86a180ab..77adaa972dd 100644
--- a/docker/lite/Dockerfile.mariadb
+++ b/docker/lite/Dockerfile.mariadb
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mariadb"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mariadb103 b/docker/lite/Dockerfile.mariadb103
index e6fe0417dc8..004dce38ab7 100644
--- a/docker/lite/Dockerfile.mariadb103
+++ b/docker/lite/Dockerfile.mariadb103
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57
index 8b0fb1a4043..87f6bf35210 100644
--- a/docker/lite/Dockerfile.mysql57
+++ b/docker/lite/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80
index 6eb37136aae..ec452fa395a 100644
--- a/docker/lite/Dockerfile.mysql80
+++ b/docker/lite/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57
index 498e9a48028..cc49eba0fb4 100644
--- a/docker/lite/Dockerfile.percona57
+++ b/docker/lite/Dockerfile.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80
index f934504579f..04274ec5e7e 100644
--- a/docker/lite/Dockerfile.percona80
+++ b/docker/lite/Dockerfile.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing
index 7a8d4b709a7..037f8b498e3 100644
--- a/docker/lite/Dockerfile.testing
+++ b/docker/lite/Dockerfile.testing
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57
index 51ed4459f7a..b4a8612e137 100644
--- a/docker/lite/Dockerfile.ubi7.mysql57
+++ b/docker/lite/Dockerfile.ubi7.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80
index db6fd40efd1..c5d481e3c46 100644
--- a/docker/lite/Dockerfile.ubi7.mysql80
+++ b/docker/lite/Dockerfile.ubi7.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57
index 997bc401147..a3b08a65674 100644
--- a/docker/lite/Dockerfile.ubi7.percona57
+++ b/docker/lite/Dockerfile.ubi7.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80
index e16522cc3ca..6875d7e2945 100644
--- a/docker/lite/Dockerfile.ubi7.percona80
+++ b/docker/lite/Dockerfile.ubi7.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80
index 6df9a647378..810531a1b07 100644
--- a/docker/lite/Dockerfile.ubi8.arm64.mysql80
+++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -38,13 +38,13 @@ FROM registry.access.redhat.com/ubi8/ubi:latest
# Install keys and dependencies
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
- && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
+ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D A8D3785C A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
# No xtrabackup packages for aarch64 yet, but still keeping this here
&& gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \
&& gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \
&& gpg --export --armor 99DB70FAE1D7CE227FB6488205B555B38483C65D > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 \
&& gpg --export --armor 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 > ${GNUPGHOME}/RPM-GPG-KEY-EPEL-8 \
- && gpg --export --armor 3A79BD29 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 \
+ && gpg --export --armor A8D3785C > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 \
&& gpg --export --armor A4A9406876FCBD3C456770C88C718D3B5072E1F5 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 \
&& rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/mysqlrepo.rpm https://dev.mysql.com/get/mysql80-community-release-el8-1.noarch.rpm \
diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80
index 76766c2fc19..f339dd31ab8 100644
--- a/docker/lite/Dockerfile.ubi8.mysql80
+++ b/docker/lite/Dockerfile.ubi8.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -38,12 +38,12 @@ FROM registry.access.redhat.com/ubi8/ubi:latest
# Install keys and dependencies
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
- && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
+ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D A8D3785C A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
&& gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \
&& gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \
&& gpg --export --armor 99DB70FAE1D7CE227FB6488205B555B38483C65D > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 \
&& gpg --export --armor 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 > ${GNUPGHOME}/RPM-GPG-KEY-EPEL-8 \
- && gpg --export --armor 3A79BD29 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 \
+ && gpg --export --armor A8D3785C > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 \
&& gpg --export --armor A4A9406876FCBD3C456770C88C718D3B5072E1F5 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 \
&& rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release ${GNUPGHOME}/RPM-GPG-KEY-EPEL-8 \
&& curl -L --retry-delay 10 --retry 3 -o /tmp/mysqlrepo.rpm https://dev.mysql.com/get/mysql80-community-release-el8-1.noarch.rpm \
diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh
index fce8f8001b2..70f95bc38aa 100755
--- a/docker/lite/install_dependencies.sh
+++ b/docker/lite/install_dependencies.sh
@@ -142,17 +142,9 @@ mariadb103)
esac
# Get GPG keys for extra apt repositories.
-case "${FLAVOR}" in
-mysql57|mysql80)
- # repo.mysql.com
- add_apt_key 8C718D3B5072E1F5
- add_apt_key 467B942D3A79BD29
- ;;
-mariadb|mariadb103)
- # digitalocean.com
- add_apt_key F1656F24C74CD1D8
- ;;
-esac
+# repo.mysql.com
+add_apt_key 8C718D3B5072E1F5
+add_apt_key A8D3785C
# All flavors (except mariadb*) include Percona XtraBackup (from repo.percona.com).
add_apt_key 9334A25F8507EFA5
diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile
index 77af8fb2b2a..b474863dbb3 100644
--- a/docker/local/Dockerfile
+++ b/docker/local/Dockerfile
@@ -1,4 +1,4 @@
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-common"
FROM "${image}"
diff --git a/docker/release.sh b/docker/release.sh
index 3ca6569387a..e3cb7c07164 100755
--- a/docker/release.sh
+++ b/docker/release.sh
@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
-vt_base_version='v13.0.0'
+vt_base_version='v15.0.5'
debian_versions='buster bullseye'
default_debian_version='bullseye'
@@ -21,6 +21,11 @@ do
docker push vitess/vtadmin:$vt_base_version-$debian_version
if [[ $debian_version == $default_debian_version ]]; then docker push vitess/vtadmin:$vt_base_version; fi
+ docker build --platform linux/amd64 --build-arg VT_BASE_VER=$vt_base_version --build-arg DEBIAN_VER=$debian_version-slim -t vitess/vtorc:$vt_base_version-$debian_version k8s/vtorc
+ docker tag vitess/vtorc:$vt_base_version-$debian_version vitess/vtorc:$vt_base_version
+ docker push vitess/vtorc:$vt_base_version-$debian_version
+ if [[ $debian_version == $default_debian_version ]]; then docker push vitess/vtorc:$vt_base_version; fi
+
docker build --platform linux/amd64 --build-arg VT_BASE_VER=$vt_base_version --build-arg DEBIAN_VER=$debian_version-slim -t vitess/vtgate:$vt_base_version-$debian_version k8s/vtgate
docker tag vitess/vtgate:$vt_base_version-$debian_version vitess/vtgate:$vt_base_version
docker push vitess/vtgate:$vt_base_version-$debian_version
diff --git a/docker/test/run.sh b/docker/test/run.sh
index e41a529c51d..870cdce425f 100755
--- a/docker/test/run.sh
+++ b/docker/test/run.sh
@@ -181,6 +181,10 @@ bashcmd=$(append_cmd "$bashcmd" "rm -rf /vt/bin; ln -s /vt/src/vitess.io/vitess/
bashcmd=$(append_cmd "$bashcmd" "rm -rf /vt/lib; ln -s /vt/src/vitess.io/vitess/lib /vt/lib")
bashcmd=$(append_cmd "$bashcmd" "rm -rf /vt/vthook; ln -s /vt/src/vitess.io/vitess/vthook /vt/vthook")
+# Setup git for private go modules
+bashcmd=$(append_cmd "$bashcmd" "export GOPRIVATE=$GOPRIVATE")
+bashcmd=$(append_cmd "$bashcmd" "git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/")
+
# Maven was setup in /vt/dist, may need to reinstall it.
bashcmd=$(append_cmd "$bashcmd" "echo 'Checking if mvn needs installing...'; if [[ ! \$(command -v mvn) ]]; then echo 'install maven'; curl -sL --connect-timeout 10 --retry 3 http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz | tar -xz && mv apache-maven-3.3.9 /vt/dist/maven; fi; echo 'mvn check done'")
diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57
index bc93a4b8e0a..f109c7b0c7d 100644
--- a/docker/vttestserver/Dockerfile.mysql57
+++ b/docker/vttestserver/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80
index 158187f4a1c..f6b088e8e92 100644
--- a/docker/vttestserver/Dockerfile.mysql80
+++ b/docker/vttestserver/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml
index d982f36c331..c48732b71ac 100644
--- a/examples/compose/docker-compose.beginners.yml
+++ b/examples/compose/docker-compose.beginners.yml
@@ -1,7 +1,7 @@
version: "2.1"
services:
consul1:
- image: consul:latest
+ image: hashicorp/consul:latest
hostname: "consul1"
ports:
- "8400:8400"
@@ -9,7 +9,7 @@ services:
- "8600:8600"
command: "agent -server -bootstrap-expect 3 -ui -disable-host-node-id -client 0.0.0.0"
consul2:
- image: consul:latest
+ image: hashicorp/consul:latest
hostname: "consul2"
expose:
- "8400"
@@ -19,7 +19,7 @@ services:
depends_on:
- consul1
consul3:
- image: consul:latest
+ image: hashicorp/consul:latest
hostname: "consul3"
expose:
- "8400"
@@ -58,7 +58,7 @@ services:
- "3306"
vtctld:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15000:$WEB_PORT"
- "$GRPC_PORT"
@@ -83,7 +83,7 @@ services:
condition: service_healthy
vtgate:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15099:$WEB_PORT"
- "$GRPC_PORT"
@@ -113,7 +113,7 @@ services:
condition: service_healthy
schemaload:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
command:
- sh
- -c
@@ -146,12 +146,12 @@ services:
environment:
- KEYSPACES=$KEYSPACE
- GRPC_PORT=15999
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
vttablet100:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15100:$WEB_PORT"
- "$GRPC_PORT"
@@ -183,7 +183,7 @@ services:
retries: 15
vttablet101:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15101:$WEB_PORT"
- "$GRPC_PORT"
@@ -215,7 +215,7 @@ services:
retries: 15
vttablet102:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15102:$WEB_PORT"
- "$GRPC_PORT"
@@ -247,7 +247,7 @@ services:
retries: 15
vttablet103:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15103:$WEB_PORT"
- "$GRPC_PORT"
@@ -279,7 +279,7 @@ services:
retries: 15
vtorc:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
command: ["sh", "-c", "/script/vtorc-up.sh"]
depends_on:
- vtctld
@@ -309,7 +309,7 @@ services:
retries: 15
vreplication:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml
index c6de4af3ce7..0964d87f69f 100644
--- a/examples/compose/docker-compose.yml
+++ b/examples/compose/docker-compose.yml
@@ -2,7 +2,7 @@ services:
consul1:
command: agent -server -bootstrap-expect 3 -ui -disable-host-node-id -client 0.0.0.0
hostname: consul1
- image: consul:latest
+ image: hashicorp/consul:latest
ports:
- 8400:8400
- 8500:8500
@@ -16,7 +16,7 @@ services:
- "8500"
- "8600"
hostname: consul2
- image: consul:latest
+ image: hashicorp/consul:latest
consul3:
command: agent -server -retry-join consul1 -disable-host-node-id
depends_on:
@@ -26,7 +26,7 @@ services:
- "8500"
- "8600"
hostname: consul3
- image: consul:latest
+ image: hashicorp/consul:latest
external_db_host:
build:
context: ./external_db/mysql
@@ -75,7 +75,7 @@ services:
- SCHEMA_FILES=lookup_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
schemaload_test_keyspace:
@@ -101,7 +101,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
set_keyspace_durability_policy:
@@ -115,7 +115,7 @@ services:
environment:
- KEYSPACES=test_keyspace lookup_keyspace
- GRPC_PORT=15999
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
vreplication:
@@ -129,7 +129,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
vtctld:
@@ -143,7 +143,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15000:8080
- "15999"
@@ -160,7 +160,7 @@ services:
--normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15099:8080
- "15999"
@@ -182,7 +182,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 13000:8080
volumes:
@@ -217,7 +217,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15101:8080
- "15999"
@@ -254,7 +254,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15102:8080
- "15999"
@@ -291,7 +291,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15201:8080
- "15999"
@@ -328,7 +328,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15202:8080
- "15999"
@@ -365,7 +365,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15301:8080
- "15999"
@@ -402,7 +402,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15302:8080
- "15999"
diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml
index 69365d4fb46..220b58ea455 100644
--- a/examples/compose/vtcompose/docker-compose.test.yml
+++ b/examples/compose/vtcompose/docker-compose.test.yml
@@ -79,7 +79,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
schemaload_unsharded_keyspace:
@@ -103,7 +103,7 @@ services:
- SCHEMA_FILES=unsharded_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
set_keyspace_durability_policy_test_keyspace:
@@ -117,7 +117,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=test_keyspace
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
set_keyspace_durability_policy_unsharded_keyspace:
@@ -130,7 +130,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=unsharded_keyspace
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
vreplication:
@@ -144,7 +144,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- .:/script
vtctld:
@@ -159,7 +159,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15000:8080
- "15999"
@@ -176,7 +176,7 @@ services:
''grpc-vtgateservice'' --normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15099:8080
- "15999"
@@ -199,7 +199,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 13000:8080
volumes:
@@ -234,7 +234,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15101:8080
- "15999"
@@ -271,7 +271,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15102:8080
- "15999"
@@ -308,7 +308,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15201:8080
- "15999"
@@ -345,7 +345,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15202:8080
- "15999"
@@ -382,7 +382,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- 15301:8080
- "15999"
diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go
index 37d9f21c191..db95a74e98c 100644
--- a/examples/compose/vtcompose/vtcompose.go
+++ b/examples/compose/vtcompose/vtcompose.go
@@ -533,7 +533,7 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo,
- op: add
path: /services/init_shard_primary%[2]d
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
command: ["sh", "-c", "/vt/bin/vtctlclient %[5]s InitShardPrimary -force %[4]s/%[3]s %[6]s-%[2]d "]
%[1]s
`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell)
@@ -565,7 +565,7 @@ func generateExternalPrimary(
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15%[1]d:%[3]d"
- "%[4]d"
@@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15%[1]d:%[4]d"
- "%[5]d"
@@ -665,7 +665,7 @@ func generateVtctld(opts vtOptions) string {
- op: add
path: /services/vtctld
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15000:%[1]d"
- "%[2]d"
@@ -698,7 +698,7 @@ func generateVtgate(opts vtOptions) string {
- op: add
path: /services/vtgate
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
ports:
- "15099:%[1]d"
- "%[2]d"
@@ -740,7 +740,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf
- op: add
path: /services/vtorc
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- ".:/script"
environment:
@@ -765,7 +765,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string {
- op: add
path: /services/vreplication
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- ".:/script"
environment:
@@ -793,7 +793,7 @@ func generateSetKeyspaceDurabilityPolicy(
- op: add
path: /services/set_keyspace_durability_policy_%[3]s
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- ".:/script"
environment:
@@ -830,7 +830,7 @@ func generateSchemaload(
- op: add
path: /services/schemaload_%[7]s
value:
- image: vitess/lite:${VITESS_TAG:-latest}
+ image: vitess/lite:v15.0.5
volumes:
- ".:/script"
environment:
diff --git a/examples/local/scripts/vtgate-up.sh b/examples/local/scripts/vtgate-up.sh
index cb33e27839b..49671444f55 100755
--- a/examples/local/scripts/vtgate-up.sh
+++ b/examples/local/scripts/vtgate-up.sh
@@ -39,6 +39,7 @@ vtgate \
--tablet_types_to_wait PRIMARY,REPLICA \
--service_map 'grpc-vtgateservice' \
--pid_file $VTDATAROOT/tmp/vtgate.pid \
+ --enable_buffer \
--mysql_auth_server_impl none \
> $VTDATAROOT/tmp/vtgate.out 2>&1 &
diff --git a/examples/local/vtadmin/discovery.json b/examples/local/vtadmin/discovery.json
index def7dd50f85..92e20fb40d8 100644
--- a/examples/local/vtadmin/discovery.json
+++ b/examples/local/vtadmin/discovery.json
@@ -10,6 +10,7 @@
"vtgates": [
{
"host": {
+ "fqdn": "localhost:15001",
"hostname": "localhost:15991"
}
}
diff --git a/examples/local/vtadmin/rbac.yaml b/examples/local/vtadmin/rbac.yaml
index 1b46933ba39..a2e665e4d8d 100644
--- a/examples/local/vtadmin/rbac.yaml
+++ b/examples/local/vtadmin/rbac.yaml
@@ -1,17 +1,5 @@
rules:
- resource: "*"
- actions:
- - "get"
- - "create"
- - "delete"
- - "put"
- - "ping"
+ actions: ["*"]
subjects: ["*"]
clusters: ["*"]
- - resource: "Shard"
- actions:
- - "emergency_reparent_shard"
- - "planned_reparent_shard"
- subjects: ["*"]
- clusters:
- - "local"
diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml
index 81efad081ba..83b90c8c5d8 100644
--- a/examples/operator/101_initial_cluster.yaml
+++ b/examples/operator/101_initial_cluster.yaml
@@ -8,14 +8,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v15.0.5
+ vtadmin: vitess/vtadmin:v15.0.5
+ vtgate: vitess/lite:v15.0.5
+ vttablet: vitess/lite:v15.0.5
+ vtbackup: vitess/lite:v15.0.5
+ vtorc: vitess/lite:v15.0.5
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql56Compatible: vitess/lite:v15.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -72,15 +72,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
@@ -237,17 +236,7 @@ stringData:
clusters: ["*"]
- resource: "Shard"
actions:
- - "emergency_reparent_shard"
- - "planned_reparent_shard"
+ - "emergency_failover_shard"
+ - "planned_failover_shard"
subjects: ["*"]
- clusters:
- - "local"
- orc_config.json: |
- {
- "Debug": true,
- "MySQLTopologyUser": "orc_client_user",
- "MySQLTopologyPassword": "orc_client_user_password",
- "MySQLReplicaUser": "vt_repl",
- "MySQLReplicaPassword": "",
- "RecoveryPeriodBlockSeconds": 5
- }
+ clusters: ["*"]
diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml
index 52f110678a2..ad11b3f498d 100644
--- a/examples/operator/201_customer_tablets.yaml
+++ b/examples/operator/201_customer_tablets.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v15.0.5
+ vtadmin: vitess/vtadmin:v15.0.5
+ vtgate: vitess/lite:v15.0.5
+ vttablet: vitess/lite:v15.0.5
+ vtbackup: vitess/lite:v15.0.5
+ vtorc: vitess/lite:v15.0.5
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql56Compatible: vitess/lite:v15.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -68,15 +68,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml
index 44a7aeea8ef..c8b890528d6 100644
--- a/examples/operator/302_new_shards.yaml
+++ b/examples/operator/302_new_shards.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v15.0.5
+ vtadmin: vitess/vtadmin:v15.0.5
+ vtgate: vitess/lite:v15.0.5
+ vttablet: vitess/lite:v15.0.5
+ vtbackup: vitess/lite:v15.0.5
+ vtorc: vitess/lite:v15.0.5
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql56Compatible: vitess/lite:v15.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -68,15 +68,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml
index a80e57c2f2a..41403450dde 100644
--- a/examples/operator/306_down_shard_0.yaml
+++ b/examples/operator/306_down_shard_0.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:latest
- vtadmin: vitess/vtadmin:latest
- vtgate: vitess/lite:latest
- vttablet: vitess/lite:latest
- vtbackup: vitess/lite:latest
- vtorc: vitess/lite:latest
+ vtctld: vitess/lite:v15.0.5
+ vtadmin: vitess/vtadmin:v15.0.5
+ vtgate: vitess/lite:v15.0.5
+ vttablet: vitess/lite:v15.0.5
+ vtbackup: vitess/lite:v15.0.5
+ vtorc: vitess/lite:v15.0.5
mysqld:
- mysql56Compatible: vitess/lite:latest
+ mysql56Compatible: vitess/lite:v15.0.5
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
@@ -68,15 +68,14 @@ spec:
durabilityPolicy: none
turndownPolicy: Immediate
vitessOrchestrator:
- configSecret:
- name: example-cluster-config
- key: orc_config.json
resources:
limits:
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
+ extraFlags:
+ recovery-period-block-duration: 5s
partitionings:
- equal:
parts: 1
diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml
index aa4ce0ef75c..731bfb4d185 100644
--- a/examples/operator/operator.yaml
+++ b/examples/operator/operator.yaml
@@ -2450,6 +2450,7 @@ spec:
properties:
parts:
format: int32
+ maximum: 65536
minimum: 1
type: integer
shardTemplate:
@@ -2801,17 +2802,6 @@ spec:
additionalProperties:
type: string
type: object
- configSecret:
- properties:
- key:
- type: string
- name:
- type: string
- volumeName:
- type: string
- required:
- - key
- type: object
extraEnv:
items:
properties:
@@ -2936,8 +2926,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
tolerations:
x-kubernetes-preserve-unknown-fields: true
- required:
- - configSecret
type: object
required:
- name
@@ -4002,6 +3990,7 @@ spec:
properties:
parts:
format: int32
+ maximum: 65536
minimum: 1
type: integer
shardTemplate:
@@ -4387,17 +4376,6 @@ spec:
additionalProperties:
type: string
type: object
- configSecret:
- properties:
- key:
- type: string
- name:
- type: string
- volumeName:
- type: string
- required:
- - key
- type: object
extraEnv:
items:
properties:
@@ -4522,8 +4500,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
tolerations:
x-kubernetes-preserve-unknown-fields: true
- required:
- - configSecret
type: object
zoneMap:
additionalProperties:
@@ -5258,17 +5234,6 @@ spec:
additionalProperties:
type: string
type: object
- configSecret:
- properties:
- key:
- type: string
- name:
- type: string
- volumeName:
- type: string
- required:
- - key
- type: object
extraEnv:
items:
properties:
@@ -5393,8 +5358,6 @@ spec:
x-kubernetes-preserve-unknown-fields: true
tolerations:
x-kubernetes-preserve-unknown-fields: true
- required:
- - configSecret
type: object
zoneMap:
additionalProperties:
@@ -5652,11 +5615,11 @@ spec:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: vitess-operator
- image: planetscale/vitess-operator:latest
+ image: planetscale/vitess-operator:v2.8.6
name: vitess-operator
resources:
limits:
- memory: 128Mi
+ memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
diff --git a/examples/region_sharding/vtadmin/discovery.json b/examples/region_sharding/vtadmin/discovery.json
index def7dd50f85..92e20fb40d8 100644
--- a/examples/region_sharding/vtadmin/discovery.json
+++ b/examples/region_sharding/vtadmin/discovery.json
@@ -10,6 +10,7 @@
"vtgates": [
{
"host": {
+ "fqdn": "localhost:15001",
"hostname": "localhost:15991"
}
}
diff --git a/examples/region_sharding/vtadmin/rbac.yaml b/examples/region_sharding/vtadmin/rbac.yaml
index 1b46933ba39..a2e665e4d8d 100644
--- a/examples/region_sharding/vtadmin/rbac.yaml
+++ b/examples/region_sharding/vtadmin/rbac.yaml
@@ -1,17 +1,5 @@
rules:
- resource: "*"
- actions:
- - "get"
- - "create"
- - "delete"
- - "put"
- - "ping"
+ actions: ["*"]
subjects: ["*"]
clusters: ["*"]
- - resource: "Shard"
- actions:
- - "emergency_reparent_shard"
- - "planned_reparent_shard"
- subjects: ["*"]
- clusters:
- - "local"
diff --git a/go.mod b/go.mod
index 3651a42d026..16fda71de30 100644
--- a/go.mod
+++ b/go.mod
@@ -1,61 +1,58 @@
module vitess.io/vitess
-go 1.18
+go 1.21
require (
cloud.google.com/go/storage v1.10.0
- github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c
- github.com/Azure/azure-pipeline-go v0.2.2
- github.com/Azure/azure-storage-blob-go v0.10.0
- github.com/DataDog/datadog-go v2.2.0+incompatible
+ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1
+ github.com/Azure/azure-pipeline-go v0.2.3
+ github.com/Azure/azure-storage-blob-go v0.15.0
+ github.com/DataDog/datadog-go v4.8.3+incompatible
github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect
github.com/PuerkitoBio/goquery v1.5.1
github.com/aquarapid/vaultlib v0.5.1
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
- github.com/aws/aws-sdk-go v1.34.2
- github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13
- github.com/cespare/xxhash/v2 v2.1.1
+ github.com/aws/aws-sdk-go v1.53.11
+ github.com/buger/jsonparser v1.1.1
+ github.com/cespare/xxhash/v2 v2.2.0
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
github.com/corpix/uarand v0.1.1 // indirect
- github.com/dave/jennifer v1.4.1
+ github.com/dave/jennifer v1.7.0
github.com/evanphx/json-patch v4.9.0+incompatible
- github.com/fsnotify/fsnotify v1.4.9
+ github.com/fsnotify/fsnotify v1.6.0
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab
- github.com/go-sql-driver/mysql v1.6.0
- github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
- github.com/golang/mock v1.5.0
+ github.com/go-sql-driver/mysql v1.7.0
+ github.com/golang/glog v1.1.2
+ github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
- github.com/golang/snappy v0.0.3
- github.com/google/go-cmp v0.5.8
+ github.com/golang/snappy v0.0.4
+ github.com/google/go-cmp v0.6.0
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
- github.com/google/uuid v1.3.0
+ github.com/google/uuid v1.3.1
github.com/googleapis/gnostic v0.4.1 // indirect
github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
- github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
+ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/hashicorp/consul/api v1.10.1
+ github.com/hashicorp/consul/api v1.18.0
github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
- github.com/hashicorp/go-sockaddr v1.0.2 // indirect
- github.com/hashicorp/go-uuid v1.0.2 // indirect
- github.com/hashicorp/serf v0.9.7 // indirect
+ github.com/hashicorp/serf v0.10.1 // indirect
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428
github.com/imdario/mergo v0.3.12 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/klauspost/compress v1.13.0
- github.com/klauspost/pgzip v1.2.4
+ github.com/klauspost/compress v1.15.15
+ github.com/klauspost/pgzip v1.2.5
github.com/krishicks/yaml-patch v0.0.10
- github.com/magiconair/properties v1.8.5
- github.com/mattn/go-sqlite3 v1.14.14
+ github.com/magiconair/properties v1.8.7
+ github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1
- github.com/mitchellh/go-testing-interface v1.14.0 // indirect
- github.com/montanaflynn/stats v0.6.3
+ github.com/montanaflynn/stats v0.7.0
github.com/olekukonko/tablewriter v0.0.5-0.20200416053754-163badb3bac6
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02
- github.com/opentracing/opentracing-go v1.1.0
+ github.com/opentracing/opentracing-go v1.2.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/philhofer/fwd v1.0.0 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible
@@ -63,17 +60,17 @@ require (
github.com/pkg/errors v0.9.1
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a
github.com/planetscale/vtprotobuf v0.3.0
- github.com/prometheus/client_golang v1.11.0
- github.com/prometheus/common v0.29.0 // indirect
+ github.com/prometheus/client_golang v1.12.0
+ github.com/prometheus/common v0.32.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1
- github.com/soheilhy/cmux v0.1.4
- github.com/spf13/cobra v1.4.0
+ github.com/soheilhy/cmux v0.1.5
+ github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
- github.com/spf13/viper v1.8.1
- github.com/spyzhov/ajson v0.4.2
- github.com/stretchr/testify v1.7.1
- github.com/tchap/go-patricia v2.2.6+incompatible
+ github.com/spf13/viper v1.9.0
+ github.com/spyzhov/ajson v0.7.2
+ github.com/stretchr/testify v1.8.1
+ github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tebeka/selenium v0.9.9
github.com/tidwall/gjson v1.12.1
github.com/tinylib/msgp v1.1.1 // indirect
@@ -85,18 +82,18 @@ require (
go.etcd.io/etcd/api/v3 v3.5.0
go.etcd.io/etcd/client/pkg/v3 v3.5.0
go.etcd.io/etcd/client/v3 v3.5.0
- golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 // indirect
+ golang.org/x/crypto v0.14.0 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
- golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
- golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
- golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
- golang.org/x/text v0.3.7
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/net v0.17.0
+ golang.org/x/oauth2 v0.20.0
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/term v0.20.0
+ golang.org/x/text v0.13.0
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
- golang.org/x/tools v0.1.10
- google.golang.org/api v0.45.0
- google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 // indirect
+ golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846
+ google.golang.org/api v0.56.0
+ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 // indirect
google.golang.org/grpc v1.45.0
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b
@@ -117,20 +114,32 @@ require (
require (
github.com/bndr/gotabulate v1.1.2
- github.com/openark/golib v0.0.0-20210531070646-355f37940af8
+ github.com/hashicorp/go-cleanhttp v0.5.1
+ github.com/hashicorp/go-version v1.6.0
+ github.com/planetscale/log v0.0.0-20221118170849-fb599bc35c50
+ github.com/slackhq/vitess-addons v0.15.3
+ github.com/slok/noglog v0.2.0
+ go.uber.org/mock v0.4.0
+ go.uber.org/zap v1.23.0
+ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
+ golang.org/x/sync v0.3.0
+ golang.org/x/tools/cmd/cover v0.1.0-deprecated
+ modernc.org/sqlite v1.20.3
)
require (
- cloud.google.com/go v0.81.0 // indirect
+ cloud.google.com/go v0.93.3 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/andybalholm/cascadia v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.9.0 // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/frankban/quicktest v1.14.3 // indirect
@@ -138,34 +147,33 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/google/gofuzz v1.1.0 // indirect
- github.com/googleapis/gax-go/v2 v2.0.5 // indirect
- github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
+ github.com/googleapis/gax-go/v2 v2.1.0 // indirect
github.com/hashicorp/go-hclog v0.12.0 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
- github.com/hashicorp/golang-lru v0.5.1 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/json-iterator/go v1.1.11 // indirect
- github.com/jstemmer/go-junit-report v0.9.1 // indirect
+ github.com/inconshreveable/mousetrap v1.0.1 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
- github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d // indirect
- github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/mattn/go-ieproxy v0.0.1 // indirect
+ github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-runewidth v0.0.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/mitchellh/mapstructure v1.4.1 // indirect
+ github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/onsi/ginkgo v1.12.1 // indirect
github.com/onsi/gomega v1.10.3 // indirect
- github.com/pelletier/go-toml v1.9.3 // indirect
+ github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/procfs v0.6.0 // indirect
+ github.com/prometheus/procfs v0.7.3 // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/satori/go.uuid v1.2.0 // indirect
github.com/spf13/afero v1.6.0 // indirect
- github.com/spf13/cast v1.3.1 // indirect
+ github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
@@ -173,18 +181,28 @@ require (
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
- go.uber.org/zap v1.17.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/ini.v1 v1.62.0 // indirect
+ gopkg.in/ini.v1 v1.63.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.20.6 // indirect
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac // indirect
k8s.io/klog v1.0.0 // indirect
k8s.io/klog/v2 v2.4.0 // indirect
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd // indirect
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
+ lukechampine.com/uint128 v1.2.0 // indirect
+ modernc.org/cc/v3 v3.40.0 // indirect
+ modernc.org/ccgo/v3 v3.16.13 // indirect
+ modernc.org/libc v1.22.2 // indirect
+ modernc.org/mathutil v1.5.0 // indirect
+ modernc.org/memory v1.5.0 // indirect
+ modernc.org/opt v0.1.3 // indirect
+ modernc.org/strutil v1.1.3 // indirect
+ modernc.org/token v1.0.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect
)
+
+replace github.com/google/glog => github.com/planetscale/noglog v0.2.1-0.20210421230640-bea75fcd2e8e
diff --git a/go.sum b/go.sum
index 19fe3502a7c..8b10b8caeb0 100644
--- a/go.sum
+++ b/go.sum
@@ -18,8 +18,13 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -28,7 +33,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -40,12 +45,12 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c h1:9K6I0yCgGSneuHCoIlJl0O09UjqqWduCwd+ZL1nHFWc=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c/go.mod h1:WpB7kf89yJUETZxQnP1kgYPNwlT2jjdDYUCoxVggM3g=
-github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
-github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
-github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs=
-github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=
+github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
+github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
+github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -53,21 +58,21 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B
github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
@@ -77,14 +82,18 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOC
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e h1:4ZrkT/RzpnROylmoQL57iVUL57wGKTR5O6KpVnbm2tA=
github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k=
-github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q=
+github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg=
github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4=
github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -115,24 +124,28 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.34.2 h1:9vCknCdTAmmV4ht7lPuda7aJXzllXwEQyCMZKJHjBrM=
-github.com/aws/aws-sdk-go v1.34.2/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.53.11 h1:KcmduYvX15rRqt4ZU/7jKkmDxU/G87LJ9MUI0yQJh00=
+github.com/aws/aws-sdk-go v1.53.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bndr/gotabulate v1.1.2 h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c=
github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U=
-github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13 h1:+qUNY4VRkEH46bLUwxCyUU+iOGJMQBVibAaYzWiwWcg=
-github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
+github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -143,6 +156,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@@ -165,14 +179,14 @@ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfc
github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U=
github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
-github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw=
-github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
+github.com/dave/jennifer v1.7.0 h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE=
+github.com/dave/jennifer v1.7.0/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -183,8 +197,8 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dvyukov/go-fuzz v0.0.0-20210914135545-4980593459a1/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -194,6 +208,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
@@ -208,8 +223,10 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
@@ -271,17 +288,17 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
-github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
+github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -294,8 +311,9 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -315,8 +333,9 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -331,9 +350,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -342,8 +362,9 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -355,24 +376,29 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
@@ -384,18 +410,18 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
-github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
+github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g=
+github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU=
+github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@@ -413,7 +439,6 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@@ -424,24 +449,24 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA=
-github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
+github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
-github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
+github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c h1:aY2hhxLhjEAbfXOx2nRJxCXezC6CO2V/yN+OCr1srtk=
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -452,9 +477,9 @@ github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGzny
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
+github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@@ -465,29 +490,30 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs=
-github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
-github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
+github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
+github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -498,8 +524,9 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E=
github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -510,20 +537,21 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
-github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
+github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
-github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -535,27 +563,24 @@ github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2w
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI=
-github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
+github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/montanaflynn/stats v0.6.3 h1:F8446DrvIF5V5smZfZ8K9nrmmix0AFgevPdLruGOmzk=
-github.com/montanaflynn/stats v0.6.3/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU=
+github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -577,12 +602,11 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/openark/golib v0.0.0-20210531070646-355f37940af8 h1:9ciIHNuyFqRWi9NpMNw9sVLB6z1ItpP5ZhTY9Q1xVu4=
-github.com/openark/golib v0.0.0-20210531070646-355f37940af8/go.mod h1:1jj8x1eDVZxgc/Z4VyamX4qTbAdHPUQA6NeVtCd8Sl8=
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q=
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc=
-github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -590,8 +614,8 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
@@ -604,6 +628,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/planetscale/log v0.0.0-20221118170849-fb599bc35c50 h1:yf0iVpE57riOj2+cLr6Fy6qX6eCLc5RLK3Kc70apP6c=
+github.com/planetscale/log v0.0.0-20221118170849-fb599bc35c50/go.mod h1:SE8Q9QtLD8tfq8bM7rGLJnnWfmxt6mTXGkfGbft1vJI=
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA=
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE=
github.com/planetscale/vtprotobuf v0.3.0 h1:oMrOdDFHS1ADc0dHtC2EApxiM5xd0cQkZeibm0WgXiQ=
@@ -617,8 +643,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg=
+github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -628,16 +655,19 @@ github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7q
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=
-github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -648,8 +678,7 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
-github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -658,25 +687,27 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1 h1:acClJNSOjUrAUKW+ZneCZymCFDWtSaJG5YQl8FoOlyI=
github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1/go.mod h1:Pgf1sZ2KrHK8vdRTV5UHGp80LT7HMUKuNAiKC402abY=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
+github.com/slackhq/vitess-addons v0.15.3 h1:W3YuaXb5h+gfu0Ougc0um13HKtiIkPzJ6W+J0+X6wtY=
+github.com/slackhq/vitess-addons v0.15.3/go.mod h1:+5kNtWka7+mWQ+/kNf6Ci+9tyZNelIkU/zgqoBYUNi8=
+github.com/slok/noglog v0.2.0 h1:1czu4l2EoJ8L92UwdSXXa1Y+c5TIjFAFm2P+mjej95E=
+github.com/slok/noglog v0.2.0/go.mod h1:TfKxwpEZPT+UA83bQ6RME146k0MM4e8mwHLf6bhcGDI=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
+github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -686,26 +717,30 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spyzhov/ajson v0.4.2 h1:JMByd/jZApPKDvNsmO90X2WWGbmT2ahDFp73QhZbg3s=
-github.com/spyzhov/ajson v0.4.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA=
+github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=
+github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
+github.com/spyzhov/ajson v0.7.2 h1:kyl+ovUoId/RSBbSbCm31xyQvPixA6Sxgvb0eWyt1Ko=
+github.com/spyzhov/ajson v0.7.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
+github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tebeka/selenium v0.9.9 h1:cNziB+etNgyH/7KlNI7RMC1ua5aH1+5wUlFQyzeMh+w=
github.com/tebeka/selenium v0.9.9/go.mod h1:5Fr8+pUvU6B1OiPfkdCKdXZyr5znvVkxuPd0NOdZCQc=
github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo=
@@ -764,12 +799,17 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
+go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
+go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -784,11 +824,12 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8=
-golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -799,6 +840,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
+golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -825,8 +868,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -853,6 +896,7 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -871,15 +915,19 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA=
-golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -891,11 +939,12 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
+golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -907,6 +956,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -938,6 +989,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -974,15 +1026,26 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -992,8 +1055,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1010,7 +1073,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -1024,7 +1086,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1058,10 +1119,15 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
-golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools/cmd/cover v0.1.0-deprecated h1:Rwy+mWYz6loAF+LnG1jHG/JWMHRMMC2/1XX3Ejkx9lA=
+golang.org/x/tools/cmd/cover v0.1.0-deprecated/go.mod h1:hMDiIvlpN1NoVgmjLjUJE9tMHyxHjFX7RuQ+rW12mSA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1088,9 +1154,13 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
-google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww=
-google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1121,6 +1191,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1141,10 +1212,19 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 h1:HBPuvo39L0DgfVn9eHR3ki/RjZoUFWa+em77e7KFDfs=
-google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1166,7 +1246,11 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=
@@ -1195,8 +1279,9 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzyc
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -1205,8 +1290,8 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c=
+gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ldap.v2 v2.5.0 h1:1rO3ojzsHUk+gq4ZYhC4Pg+EzWaaKIV8+DJwExS5/QQ=
gopkg.in/ldap.v2 v2.5.0/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
@@ -1227,8 +1312,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1271,6 +1357,34 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAG
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sqlite v1.20.3 h1:SqGJMMxjj1PHusLxdYxeQSodg7Jxn9WWkaAQjKrntZs=
+modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A=
+modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34=
+modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE=
+modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
+modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/go/bucketpool/bucketpool_test.go b/go/bucketpool/bucketpool_test.go
index 3d74c3f94ce..0f54d2ea3a6 100644
--- a/go/bucketpool/bucketpool_test.go
+++ b/go/bucketpool/bucketpool_test.go
@@ -167,6 +167,7 @@ func TestPoolWeirdMaxSize(t *testing.T) {
}
func TestFuzz(t *testing.T) {
+ t.Skip()
maxTestSize := 16384
for i := 0; i < 20000; i++ {
minSize := rand.Intn(maxTestSize)
diff --git a/go/cmd/internal/docgen/docgen.go b/go/cmd/internal/docgen/docgen.go
index 6fe461e5af7..16b237ea15f 100644
--- a/go/cmd/internal/docgen/docgen.go
+++ b/go/cmd/internal/docgen/docgen.go
@@ -46,8 +46,10 @@ import (
"fmt"
"io/fs"
"os"
+ "os/exec"
"path/filepath"
"strings"
+ "sync"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
@@ -57,6 +59,10 @@ import (
// written to `dir`. The root command is also renamed to _index.md to remain
// compatible with the vitessio/website content structure expectations.
func GenerateMarkdownTree(cmd *cobra.Command, dir string) error {
+ sha, err := getCommitID("HEAD")
+ if err != nil {
+ return fmt.Errorf("failed to get commit id for HEAD: %w", err)
+ }
switch fi, err := os.Stat(dir); {
case errors.Is(err, fs.ErrNotExist):
if err := os.MkdirAll(dir, 0755); err != nil {
@@ -69,7 +75,7 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error {
}
recursivelyDisableAutoGenTags(cmd)
- if err := doc.GenMarkdownTreeCustom(cmd, dir, frontmatterFilePrepender, linkHandler); err != nil {
+ if err := doc.GenMarkdownTreeCustom(cmd, dir, frontmatterFilePrepender(sha), linkHandler); err != nil {
return err
}
@@ -79,6 +85,120 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error {
return fmt.Errorf("failed to index doc (generated at %s) into proper position (%s): %w", rootDocPath, indexDocPath, err)
}
+ if err := anonymizeHomedir(indexDocPath); err != nil {
+ return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", indexDocPath, err)
+ }
+
+ if err := restructure(dir, dir, cmd.Name(), cmd.Commands()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+/*
+_index.md (aka vtctldclient.md)
+vtctldclient_AddCellInfo.md
+vtctldclient_movetables.md
+vtctldclient_movetables_show.md
+
+becomes
+
+_index.md
+vtctldclient_AddCellInfo.md
+vtctldclient_movetables/
+ _index.md
+ vtctldclient_movetables_show.md
+*/
+
+func restructure(rootDir string, dir string, name string, commands []*cobra.Command) error {
+ for _, cmd := range commands {
+ fullCmdFilename := strings.Join([]string{name, cmd.Name()}, "_")
+
+ children := cmd.Commands()
+
+ switch {
+ case len(children) > 0:
+ // Command (top-level or not) with children.
+ // 1. Set up a directory for its children.
+ // 2. Move its doc into that dir as "_index.md"
+ // 3. Restructure its children.
+ cmdDir := filepath.Join(dir, fullCmdFilename)
+ if err := os.MkdirAll(cmdDir, 0755); err != nil {
+ return fmt.Errorf("failed to create subdir for %s: %w", fullCmdFilename, err)
+ }
+
+ indexFile := filepath.Join(cmdDir, "_index.md")
+ if err := os.Rename(filepath.Join(rootDir, fullCmdFilename+".md"), indexFile); err != nil {
+ return fmt.Errorf("failed to move index doc for command %s with children: %w", fullCmdFilename, err)
+ }
+
+ if err := anonymizeHomedir(indexFile); err != nil {
+ return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", indexFile, err)
+ }
+
+ if err := restructure(rootDir, cmdDir, fullCmdFilename, children); err != nil {
+ return fmt.Errorf("failed to restructure child commands for %s: %w", fullCmdFilename, err)
+ }
+ case rootDir != dir:
+ // Sub-command without children.
+ // 1. Move its doc into the directory for its parent, name unchanged.
+ if cmd.Name() == "help" {
+ // all commands with children have their own "help" subcommand,
+ // which we do not generate docs for
+ continue
+ }
+
+ oldName := filepath.Join(rootDir, fullCmdFilename+".md")
+ newName := filepath.Join(dir, fullCmdFilename+".md")
+
+ if err := os.Rename(oldName, newName); err != nil {
+ return fmt.Errorf("failed to move child command %s to its parent's dir: %w", fullCmdFilename, err)
+ }
+
+ sed := newParentLinkSedCommand(name, newName)
+ if out, err := sed.CombinedOutput(); err != nil {
+ return fmt.Errorf("failed to rewrite links to parent command in child %s: %w (extra: %s)", newName, err, out)
+ }
+
+ if err := anonymizeHomedir(newName); err != nil {
+ return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", newName, err)
+ }
+ default:
+ // Top-level command without children. Nothing to restructure.
+ continue
+ }
+ }
+
+ return nil
+}
+
+func newParentLinkSedCommand(parent string, file string) *exec.Cmd {
+ return exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:(./%s/):(../):i", parent), file)
+}
+
+var (
+ wd string
+ once sync.Once
+)
+
+func anonymizeHomedir(file string) (err error) {
+ once.Do(func() {
+ // Only do this once per run.
+ wd, err = os.Getwd()
+ })
+ if err != nil {
+ return err
+ }
+
+ // We're replacing the stuff inside the square brackets in the example sed
+ // below:
+ // 's:Paths to search for config files in. (default \[.*\])$:Paths to search for config files in. (default \[$WORKDIR\]):'
+ sed := exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:%s:$WORKDIR:i", wd), file)
+ if out, err := sed.CombinedOutput(); err != nil {
+ return fmt.Errorf("%w: %s", err, out)
+ }
+
return nil
}
@@ -91,22 +211,37 @@ func recursivelyDisableAutoGenTags(root *cobra.Command) {
}
}
+func getCommitID(ref string) (string, error) {
+ gitShow := exec.Command("git", "show", "--pretty=format:%H", "--no-patch", ref)
+ out, err := gitShow.Output()
+ if err != nil {
+ return "", err
+ }
+
+ return string(out), nil
+}
+
const frontmatter = `---
title: %s
series: %s
+commit: %s
---
`
-func frontmatterFilePrepender(filename string) string {
- name := filepath.Base(filename)
- base := strings.TrimSuffix(name, filepath.Ext(name))
+func frontmatterFilePrepender(sha string) func(filename string) string {
+ return func(filename string) string {
+ name := filepath.Base(filename)
+ base := strings.TrimSuffix(name, filepath.Ext(name))
- root, cmdName, ok := strings.Cut(base, "_")
- if !ok { // no `_`, so not a subcommand
- cmdName = root
- }
+ root, cmdName, ok := strings.Cut(base, "_")
+ if !ok { // no `_`, so not a subcommand
+ cmdName = root
+ }
+
+ cmdName = strings.ReplaceAll(cmdName, "_", " ")
- return fmt.Sprintf(frontmatter, cmdName, root)
+ return fmt.Sprintf(frontmatter, cmdName, root, sha)
+ }
}
func linkHandler(filename string) string {
diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go
index 057e48d4aa1..9c5710c3418 100644
--- a/go/cmd/mysqlctl/mysqlctl.go
+++ b/go/cmd/mysqlctl/mysqlctl.go
@@ -47,8 +47,6 @@ func init() {
servenv.RegisterDefaultFlags()
servenv.RegisterDefaultSocketFileFlags()
servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
servenv.RegisterServiceMapFlag()
// mysqlctl only starts and stops mysql, only needs dba.
dbconfigs.RegisterFlags(dbconfigs.Dba)
diff --git a/go/cmd/query_analyzer/query_analyzer.go b/go/cmd/query_analyzer/query_analyzer.go
index 087c88fa5f3..2138bde2673 100644
--- a/go/cmd/query_analyzer/query_analyzer.go
+++ b/go/cmd/query_analyzer/query_analyzer.go
@@ -70,6 +70,7 @@ func main() {
acl.RegisterFlags(fs)
servenv.RegisterMySQLServerFlags(fs)
_flag.Parse(fs)
+ logutil.PurgeLogs()
for _, filename := range _flag.Args() {
fmt.Printf("processing: %s\n", filename)
if err := processFile(filename); err != nil {
diff --git a/go/cmd/rulesctl/cmd/main.go b/go/cmd/rulesctl/cmd/main.go
index d100b8c8680..1b5ab30acfd 100644
--- a/go/cmd/rulesctl/cmd/main.go
+++ b/go/cmd/rulesctl/cmd/main.go
@@ -4,6 +4,7 @@ import (
"github.com/spf13/cobra"
_flag "vitess.io/vitess/go/internal/flag"
+ "vitess.io/vitess/go/vt/logutil"
)
var configFile string
@@ -14,6 +15,7 @@ func Main() *cobra.Command {
Args: cobra.NoArgs,
PreRun: func(cmd *cobra.Command, args []string) {
_flag.TrickGlog()
+ logutil.PurgeLogs()
},
Run: func(cmd *cobra.Command, _ []string) { cmd.Help() },
}
diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go
index 2720f701379..210e2edb918 100644
--- a/go/cmd/vtadmin/main.go
+++ b/go/cmd/vtadmin/main.go
@@ -26,6 +26,7 @@ import (
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtadmin"
"vitess.io/vitess/go/vt/vtadmin/cache"
@@ -58,6 +59,7 @@ var (
Use: "vtadmin",
PreRun: func(cmd *cobra.Command, args []string) {
_flag.TrickGlog()
+ logutil.PurgeLogs()
if opts.EnableTracing || httpOpts.EnableTracing {
startTracing(cmd)
@@ -168,7 +170,7 @@ func main() {
rootCmd.Flags().BoolVar(&httpOpts.EnableTracing, "http-tracing", false, "whether to enable tracing on the HTTP server")
// gRPC server flags
- rootCmd.Flags().BoolVar(&opts.AllowReflection, "grpc-allow-reflection", false, "whether to register the gRPC server for reflection; this is required to use tools like `grpc_cli`")
+ rootCmd.Flags().BoolVar(&opts.AllowReflection, "grpc-allow-reflection", false, "whether to register the gRPC server for reflection; this is required to use tools like grpc_cli")
rootCmd.Flags().BoolVar(&opts.EnableChannelz, "grpc-enable-channelz", false, "whether to enable the channelz service on the gRPC server")
// HTTP server flags
diff --git a/go/cmd/vtbench/vtbench.go b/go/cmd/vtbench/vtbench.go
index 13c024fdd8e..d1b67cc4314 100644
--- a/go/cmd/vtbench/vtbench.go
+++ b/go/cmd/vtbench/vtbench.go
@@ -103,10 +103,7 @@ func initFlags(fs *pflag.FlagSet) {
fs.StringVar(&sql, "sql", sql, "SQL statement to execute")
fs.IntVar(&threads, "threads", threads, "Number of parallel threads to run")
fs.IntVar(&count, "count", count, "Number of queries per thread")
-
grpccommon.RegisterFlags(fs)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
acl.RegisterFlags(fs)
servenv.RegisterMySQLServerFlags(fs)
}
diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go
index 4295766723a..adc060d7737 100644
--- a/go/cmd/vtclient/vtclient.go
+++ b/go/cmd/vtclient/vtclient.go
@@ -184,6 +184,8 @@ func run() (*results, error) {
_flag.Parse(fs)
args := _flag.Args()
+ logutil.PurgeLogs()
+
if len(args) == 0 {
pflag.Usage()
return nil, errors.New("no arguments provided. See usage above")
diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go
index acbea8ff490..ff52c284216 100644
--- a/go/cmd/vtcombo/main.go
+++ b/go/cmd/vtcombo/main.go
@@ -346,3 +346,8 @@ func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) err
func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error {
return nil
}
+
+// SemiSyncExtensionLoaded implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) {
+ return true, nil
+}
diff --git a/go/cmd/vtctldclient/command/backups.go b/go/cmd/vtctldclient/command/backups.go
index 53aac5b51bb..12789b4eddb 100644
--- a/go/cmd/vtctldclient/command/backups.go
+++ b/go/cmd/vtctldclient/command/backups.go
@@ -35,7 +35,7 @@ import (
var (
// Backup makes a Backup gRPC call to a vtctld.
Backup = &cobra.Command{
- Use: "Backup [--concurrency ] [--allow-primary] ",
+ Use: "Backup [--concurrency ] [--allow-primary] [--backup-engine=enginename] ",
Short: "Uses the BackupStorage service on the given tablet to create and store a new backup.",
DisableFlagsInUseLine: true,
Args: cobra.ExactArgs(1),
@@ -70,7 +70,7 @@ If no replica-type tablet can be found, the backup can be taken on the primary i
}
// RestoreFromBackup makes a RestoreFromBackup gRPC call to a vtctld.
RestoreFromBackup = &cobra.Command{
- Use: "RestoreFromBackup [--backup-timestamp|-t ] ",
+ Use: "RestoreFromBackup [--backup-timestamp|-t ] [--allowed-backup-engines=enginename,] ",
Short: "Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`.",
DisableFlagsInUseLine: true,
Args: cobra.ExactArgs(1),
@@ -81,6 +81,7 @@ If no replica-type tablet can be found, the backup can be taken on the primary i
var backupOptions = struct {
AllowPrimary bool
Concurrency uint64
+ BackupEngine string
}{}
func commandBackup(cmd *cobra.Command, args []string) error {
@@ -91,11 +92,17 @@ func commandBackup(cmd *cobra.Command, args []string) error {
cli.FinishedParsing(cmd)
- stream, err := client.Backup(commandCtx, &vtctldatapb.BackupRequest{
+ req := &vtctldatapb.BackupRequest{
TabletAlias: tabletAlias,
AllowPrimary: backupOptions.AllowPrimary,
Concurrency: backupOptions.Concurrency,
- })
+ }
+
+ if backupOptions.BackupEngine != "" {
+ req.BackupEngine = &backupOptions.BackupEngine
+ }
+
+ stream, err := client.Backup(commandCtx, req)
if err != nil {
return err
}
@@ -129,8 +136,8 @@ func commandBackupShard(cmd *cobra.Command, args []string) error {
stream, err := client.BackupShard(commandCtx, &vtctldatapb.BackupShardRequest{
Keyspace: keyspace,
Shard: shard,
- AllowPrimary: backupOptions.AllowPrimary,
- Concurrency: backupOptions.Concurrency,
+ AllowPrimary: backupShardOptions.AllowPrimary,
+ Concurrency: backupShardOptions.Concurrency,
})
if err != nil {
return err
@@ -210,7 +217,8 @@ func commandRemoveBackup(cmd *cobra.Command, args []string) error {
}
var restoreFromBackupOptions = struct {
- BackupTimestamp string
+ BackupTimestamp string
+ AllowedBackupEngines []string
}{}
func commandRestoreFromBackup(cmd *cobra.Command, args []string) error {
@@ -220,7 +228,8 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error {
}
req := &vtctldatapb.RestoreFromBackupRequest{
- TabletAlias: alias,
+ TabletAlias: alias,
+ AllowedBackupEngines: restoreFromBackupOptions.AllowedBackupEngines,
}
if restoreFromBackupOptions.BackupTimestamp != "" {
@@ -255,6 +264,7 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error {
func init() {
Backup.Flags().BoolVar(&backupOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.")
Backup.Flags().Uint64Var(&backupOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.")
+ Backup.Flags().StringVar(&backupOptions.BackupEngine, "backup-engine", "", "Request a specific backup engine for this backup request. Defaults to the preferred backup engine of the target vttablet")
Root.AddCommand(Backup)
BackupShard.Flags().BoolVar(&backupShardOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.")
@@ -268,5 +278,6 @@ func init() {
Root.AddCommand(RemoveBackup)
RestoreFromBackup.Flags().StringVarP(&restoreFromBackupOptions.BackupTimestamp, "backup-timestamp", "t", "", "Use the backup taken at, or closest before, this timestamp. Omit to use the latest backup. Timestamp format is \"YYYY-mm-DD.HHMMSS\".")
+ RestoreFromBackup.Flags().StringSliceVar(&restoreFromBackupOptions.AllowedBackupEngines, "allowed-backup-engines", restoreFromBackupOptions.AllowedBackupEngines, "if set, only backups taken with the specified engines are eligible to be restored")
Root.AddCommand(RestoreFromBackup)
}
diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go
index 985cd227607..6168fa9d85c 100644
--- a/go/cmd/vtctldclient/command/reparents.go
+++ b/go/cmd/vtctldclient/command/reparents.go
@@ -53,6 +53,7 @@ WARNING: this can cause data loss on an already-replicating shard. PlannedRepare
EmergencyReparentShard should be used instead.
`,
DisableFlagsInUseLine: true,
+ Deprecated: "Please use PlannedReparentShard instead",
Args: cobra.ExactArgs(2),
RunE: commandInitShardPrimary,
}
@@ -91,6 +92,7 @@ var emergencyReparentShardOptions = struct {
Force bool
WaitReplicasTimeout time.Duration
NewPrimaryAliasStr string
+ ExpectedPrimaryAliasStr string
IgnoreReplicaAliasStrList []string
PreventCrossCellPromotion bool
}{}
@@ -103,6 +105,7 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error {
var (
newPrimaryAlias *topodatapb.TabletAlias
+ expectedPrimaryAlias *topodatapb.TabletAlias
ignoreReplicaAliases = make([]*topodatapb.TabletAlias, len(emergencyReparentShardOptions.IgnoreReplicaAliasStrList))
)
@@ -113,6 +116,13 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error {
}
}
+ if emergencyReparentShardOptions.ExpectedPrimaryAliasStr != "" {
+ expectedPrimaryAlias, err = topoproto.ParseTabletAlias(emergencyReparentShardOptions.ExpectedPrimaryAliasStr)
+ if err != nil {
+ return err
+ }
+ }
+
for i, aliasStr := range emergencyReparentShardOptions.IgnoreReplicaAliasStrList {
alias, err := topoproto.ParseTabletAlias(aliasStr)
if err != nil {
@@ -128,6 +138,7 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error {
Keyspace: keyspace,
Shard: shard,
NewPrimary: newPrimaryAlias,
+ ExpectedPrimary: expectedPrimaryAlias,
IgnoreReplicas: ignoreReplicaAliases,
WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout),
PreventCrossCellPromotion: emergencyReparentShardOptions.PreventCrossCellPromotion,
@@ -180,9 +191,10 @@ func commandInitShardPrimary(cmd *cobra.Command, args []string) error {
}
var plannedReparentShardOptions = struct {
- NewPrimaryAliasStr string
- AvoidPrimaryAliasStr string
- WaitReplicasTimeout time.Duration
+ NewPrimaryAliasStr string
+ AvoidPrimaryAliasStr string
+ ExpectedPrimaryAliasStr string
+ WaitReplicasTimeout time.Duration
}{}
func commandPlannedReparentShard(cmd *cobra.Command, args []string) error {
@@ -192,8 +204,9 @@ func commandPlannedReparentShard(cmd *cobra.Command, args []string) error {
}
var (
- newPrimaryAlias *topodatapb.TabletAlias
- avoidPrimaryAlias *topodatapb.TabletAlias
+ newPrimaryAlias *topodatapb.TabletAlias
+ avoidPrimaryAlias *topodatapb.TabletAlias
+ expectedPrimaryAlias *topodatapb.TabletAlias
)
if plannedReparentShardOptions.NewPrimaryAliasStr != "" {
@@ -210,6 +223,13 @@ func commandPlannedReparentShard(cmd *cobra.Command, args []string) error {
}
}
+ if plannedReparentShardOptions.ExpectedPrimaryAliasStr != "" {
+ expectedPrimaryAlias, err = topoproto.ParseTabletAlias(plannedReparentShardOptions.ExpectedPrimaryAliasStr)
+ if err != nil {
+ return err
+ }
+ }
+
cli.FinishedParsing(cmd)
resp, err := client.PlannedReparentShard(commandCtx, &vtctldatapb.PlannedReparentShardRequest{
@@ -217,6 +237,7 @@ func commandPlannedReparentShard(cmd *cobra.Command, args []string) error {
Shard: shard,
NewPrimary: newPrimaryAlias,
AvoidPrimary: avoidPrimaryAlias,
+ ExpectedPrimary: expectedPrimaryAlias,
WaitReplicasTimeout: protoutil.DurationToProto(plannedReparentShardOptions.WaitReplicasTimeout),
})
if err != nil {
@@ -279,6 +300,7 @@ func commandTabletExternallyReparented(cmd *cobra.Command, args []string) error
func init() {
EmergencyReparentShard.Flags().DurationVar(&emergencyReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up in reparenting.")
EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary. If not specified, the vtctld will select the best candidate to promote.")
+ EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.ExpectedPrimaryAliasStr, "expected-primary", "", "Alias of a tablet that must be the current primary in order for the reparent to be processed.")
EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.PreventCrossCellPromotion, "prevent-cross-cell-promotion", false, "Only promotes a new primary from the same cell as the previous primary.")
EmergencyReparentShard.Flags().StringSliceVarP(&emergencyReparentShardOptions.IgnoreReplicaAliasStrList, "ignore-replicas", "i", nil, "Comma-separated, repeated list of replica tablet aliases to ignore during the emergency reparent.")
Root.AddCommand(EmergencyReparentShard)
@@ -290,6 +312,7 @@ func init() {
PlannedReparentShard.Flags().DurationVar(&plannedReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up on replication both before and after reparenting.")
PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary.")
PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.AvoidPrimaryAliasStr, "avoid-primary", "", "Alias of a tablet that should not be the primary; i.e. \"reparent to any other tablet if this one is the primary\".")
+ PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.ExpectedPrimaryAliasStr, "expected-primary", "", "Alias of a tablet that must be the current primary in order for the reparent to be processed.")
Root.AddCommand(PlannedReparentShard)
Root.AddCommand(ReparentTablet)
diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go
index 0d53c97c72d..0adce0774d7 100644
--- a/go/cmd/vtctldclient/command/root.go
+++ b/go/cmd/vtctldclient/command/root.go
@@ -19,12 +19,14 @@ package command
import (
"context"
"errors"
+ "fmt"
"io"
"time"
"github.com/spf13/cobra"
"vitess.io/vitess/go/trace"
+ "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtctl/vtctldclient"
)
@@ -48,6 +50,7 @@ var (
// We use PersistentPreRun to set up the tracer, grpc client, and
// command context for every command.
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
+ logutil.PurgeLogs()
traceCloser = trace.StartTracing("vtctldclient")
if VtctldClientProtocol != "local" {
if err := ensureServerArg(); err != nil {
@@ -82,6 +85,26 @@ var (
// propagated).
SilenceErrors: true,
Version: servenv.AppVersion.String(),
+ // If we've reached this function, it means that:
+ //
+ // (1) The user specified some positional arguments, which, for the way
+ // we've structured things can only be a subcommand name, **and**
+ //
+ // (2) Cobra was unable to find a subcommand with that name for which to
+ // call a Run or RunE function.
+ //
+ // From this we conclude that the user was trying to either run a
+ // command that doesn't exist (e.g. "vtctldclient delete-my-data") or
+ // has misspelled a legitimate command (e.g. "vtctldclient StapReplication").
+ // If we think this has happened, return an error, which will get
+ // displayed to the user in main.go along with the usage.
+ RunE: func(cmd *cobra.Command, args []string) error {
+ if cmd.Flags().NArg() > 0 {
+ return fmt.Errorf("unknown command: %s", cmd.Flags().Arg(0))
+ }
+
+ return nil
+ },
}
)
diff --git a/go/cmd/vtctldclient/command/root_test.go b/go/cmd/vtctldclient/command/root_test.go
new file mode 100644
index 00000000000..155fac78705
--- /dev/null
+++ b/go/cmd/vtctldclient/command/root_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command_test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command"
+ "vitess.io/vitess/go/vt/vtctl/localvtctldclient"
+
+ vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
+)
+
+type emptyLocalServer struct {
+ vtctlservicepb.UnimplementedVtctldServer
+}
+
+func TestRoot(t *testing.T) {
+ t.Run("error on unknown subcommand", func(t *testing.T) {
+ args := append([]string{}, os.Args...)
+ protocol := command.VtctldClientProtocol
+ localvtctldclient.SetServer(&emptyLocalServer{})
+
+ t.Cleanup(func() {
+ os.Args = append([]string{}, args...)
+ command.VtctldClientProtocol = protocol
+ })
+
+ os.Args = []string{"vtctldclient", "this-is-bunk"}
+ command.VtctldClientProtocol = "local"
+
+ err := command.Root.Execute()
+ require.Error(t, err, "root command should error on unknown command")
+ assert.Contains(t, err.Error(), "unknown command")
+ })
+}
diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go
index 6a9f3981c43..8abe8bd0b94 100644
--- a/go/cmd/vtctldclient/command/schema.go
+++ b/go/cmd/vtctldclient/command/schema.go
@@ -293,7 +293,7 @@ func init() {
ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", wrangler.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.")
ApplySchema.Flags().BoolVar(&applySchemaOptions.SkipPreflight, "skip-preflight", false, "Skip pre-apply schema checks, and directly forward schema change query to shards.")
ApplySchema.Flags().StringVar(&applySchemaOptions.CallerID, "caller-id", "", "Effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used).")
- ApplySchema.Flags().StringSliceVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.")
+ ApplySchema.Flags().StringArrayVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.")
ApplySchema.Flags().StringVar(&applySchemaOptions.SQLFile, "sql-file", "", "Path to a file containing semicolon-delimited SQL commands to apply. Exactly one of --sql|--sql-file is required.")
Root.AddCommand(ApplySchema)
diff --git a/go/cmd/vtctldclient/command/shards.go b/go/cmd/vtctldclient/command/shards.go
index b670b2ce929..5ce45e07a78 100644
--- a/go/cmd/vtctldclient/command/shards.go
+++ b/go/cmd/vtctldclient/command/shards.go
@@ -153,6 +153,15 @@ Use ctrl-C to interrupt the command and see partial results if needed.`,
Args: cobra.ExactArgs(2),
RunE: commandSourceShardDelete,
}
+
+ // ValidateVersionShard makes a ValidateVersionShard gRPC request to a vtctld.
+ ValidateVersionShard = &cobra.Command{
+ Use: "ValidateVersionShard ",
+ Short: "Validates that the version on the primary matches all of the replicas.",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: commandValidateVersionShard,
+ }
)
var createShardOptions = struct {
@@ -546,6 +555,31 @@ func commandSourceShardDelete(cmd *cobra.Command, args []string) error {
return nil
}
+func commandValidateVersionShard(cmd *cobra.Command, args []string) error {
+ keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0))
+ if err != nil {
+ return err
+ }
+
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.ValidateVersionShard(commandCtx, &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: keyspace,
+ Shard: shard,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
func init() {
CreateShard.Flags().BoolVarP(&createShardOptions.Force, "force", "f", false, "Overwrite an existing shard record, if one exists.")
CreateShard.Flags().BoolVarP(&createShardOptions.IncludeParent, "include-parent", "p", false, "Creates the parent keyspace record if does not already exist.")
@@ -574,6 +608,7 @@ func init() {
Root.AddCommand(ShardReplicationFix)
Root.AddCommand(ShardReplicationPositions)
Root.AddCommand(ShardReplicationRemove)
+ Root.AddCommand(ValidateVersionShard)
SourceShardAdd.Flags().StringVar(&sourceShardAddOptions.KeyRangeStr, "key-range", "", "Key range to use for the SourceShard.")
SourceShardAdd.Flags().StringSliceVar(&sourceShardAddOptions.Tables, "tables", nil, "Comma-separated lists of tables to replicate (for MoveTables). Each table name is either an exact match, or a regular expression of the form \"/regexp/\".")
diff --git a/go/cmd/vtctldclient/command/topology.go b/go/cmd/vtctldclient/command/topology.go
new file mode 100644
index 00000000000..1972baad67f
--- /dev/null
+++ b/go/cmd/vtctldclient/command/topology.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // GetTopologyPath makes a GetTopologyPath gRPC call to a vtctld.
+ GetTopologyPath = &cobra.Command{
+ Use: "GetTopologyPath ",
+ Short: "Gets the file located at the specified path in the topology server.",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: commandGetTopologyPath,
+ }
+)
+
+func commandGetTopologyPath(cmd *cobra.Command, args []string) error {
+ path := cmd.Flags().Arg(0)
+
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.GetTopologyPath(commandCtx, &vtctldatapb.GetTopologyPathRequest{
+ Path: path,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp.Cell)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
+
+func init() {
+ Root.AddCommand(GetTopologyPath)
+}
diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go
index 2f426891bf9..d043ecf4f95 100644
--- a/go/cmd/vtgate/vtgate.go
+++ b/go/cmd/vtgate/vtgate.go
@@ -28,6 +28,7 @@ import (
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/srvtopo"
@@ -36,18 +37,17 @@ import (
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
var (
- cell = "test_nj"
- tabletTypesToWait, plannerName string
+ cell = ""
+ tabletTypesToWait []topodatapb.TabletType
+ plannerName string
)
func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&cell, "cell", cell, "cell to use")
- fs.StringVar(&tabletTypesToWait, "tablet_types_to_wait", tabletTypesToWait, "wait till connected for specified tablet types during Gateway initialization")
+ fs.Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.")
fs.StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.")
acl.RegisterFlags(fs)
@@ -134,12 +134,7 @@ func main() {
tabletTypes := make([]topodatapb.TabletType, 0, 1)
if len(tabletTypesToWait) != 0 {
- for _, ttStr := range strings.Split(tabletTypesToWait, ",") {
- tt, err := topoproto.ParseTabletType(ttStr)
- if err != nil {
- log.Errorf("unknown tablet type: %v", ttStr)
- continue
- }
+ for _, tt := range tabletTypesToWait {
if topoproto.IsServingType(tt) {
tabletTypes = append(tabletTypes, tt)
}
diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go
index f5418819a05..0d28c8da9bf 100644
--- a/go/cmd/vtorc/main.go
+++ b/go/cmd/vtorc/main.go
@@ -22,8 +22,8 @@ import (
"strings"
_ "github.com/go-sql-driver/mysql"
- _ "github.com/mattn/go-sqlite3"
"github.com/spf13/pflag"
+ _ "modernc.org/sqlite"
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/vt/grpccommon"
diff --git a/go/cmd/vtorc/status.go b/go/cmd/vtorc/status.go
index bdb54963051..a4d8a59d3fc 100644
--- a/go/cmd/vtorc/status.go
+++ b/go/cmd/vtorc/status.go
@@ -24,7 +24,7 @@ import (
// addStatusParts adds UI parts to the /debug/status page of VTOrc
func addStatusParts() {
servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any {
- recoveries, _ := logic.ReadRecentRecoveries("", false, 0)
+ recoveries, _ := logic.ReadRecentRecoveries(false, 0)
return recoveries
})
}
diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/vttestserver_test.go
index 0665d5f9c46..5033c8a708e 100644
--- a/go/cmd/vttestserver/vttestserver_test.go
+++ b/go/cmd/vttestserver/vttestserver_test.go
@@ -108,7 +108,10 @@ func TestPersistentMode(t *testing.T) {
// reboot the persistent cluster
cluster.TearDown()
cluster, err = startPersistentCluster(dir)
- defer cluster.TearDown()
+ defer func() {
+ cluster.PersistentMode = false // Cleanup the tmpdir as we're done
+ cluster.TearDown()
+ }()
assert.NoError(t, err)
// rerun our sanity checks to make sure vschema migrations are run during every startup
@@ -249,7 +252,10 @@ func TestMtlsAuth(t *testing.T) {
fmt.Sprintf("--vtctld_grpc_ca=%s", caCert),
fmt.Sprintf("--grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp"))
assert.NoError(t, err)
- defer cluster.TearDown()
+ defer func() {
+ cluster.PersistentMode = false // Cleanup the tmpdir as we're done
+ cluster.TearDown()
+ }()
// startCluster will apply vschema migrations using vtctl grpc and the clientCert.
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"})
diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go
index 28deb05d527..5cc736ea959 100644
--- a/go/cmd/zk/zkcmd.go
+++ b/go/cmd/zk/zkcmd.go
@@ -40,8 +40,8 @@ import (
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/zk2topo"
- "vitess.io/vitess/go/vt/vtctl"
)
var doc = `
@@ -147,6 +147,7 @@ func main() {
}
pflag.Parse()
+ logutil.PurgeLogs()
if help || pflag.Arg(0) == "help" {
pflag.Usage()
@@ -588,7 +589,7 @@ func cmdCat(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
}
decoded := ""
if decodeProto {
- decoded, err = vtctl.DecodeContent(zkPath, data, false)
+ decoded, err = topo.DecodeContent(zkPath, data, false)
if err != nil {
log.Warningf("cat: cannot proto decode %v: %v", zkPath, err)
decoded = string(data)
diff --git a/go/event/syslogger/fake_logger.go b/go/event/syslogger/fake_logger.go
new file mode 100644
index 00000000000..a3eeaf307c1
--- /dev/null
+++ b/go/event/syslogger/fake_logger.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package syslogger
+
+import (
+ "fmt"
+
+ "vitess.io/vitess/go/vt/log"
+)
+
+type loggerMsg struct {
+ msg string
+ level string
+}
+type TestLogger struct {
+ logs []loggerMsg
+ savedInfof func(format string, args ...any)
+ savedWarningf func(format string, args ...any)
+ savedErrorf func(format string, args ...any)
+}
+
+func NewTestLogger() *TestLogger {
+ tl := &TestLogger{
+ savedInfof: log.Infof,
+ savedWarningf: log.Warningf,
+ savedErrorf: log.Errorf,
+ }
+ log.Infof = tl.recordInfof
+ log.Warningf = tl.recordWarningf
+ log.Errorf = tl.recordErrorf
+ return tl
+}
+
+func (tl *TestLogger) Close() {
+ log.Infof = tl.savedInfof
+ log.Warningf = tl.savedWarningf
+ log.Errorf = tl.savedErrorf
+}
+
+func (tl *TestLogger) recordInfof(format string, args ...any) {
+ msg := fmt.Sprintf(format, args...)
+ tl.logs = append(tl.logs, loggerMsg{msg, "INFO"})
+ tl.savedInfof(msg)
+}
+
+func (tl *TestLogger) recordWarningf(format string, args ...any) {
+ msg := fmt.Sprintf(format, args...)
+ tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"})
+ tl.savedWarningf(msg)
+}
+
+func (tl *TestLogger) recordErrorf(format string, args ...any) {
+ msg := fmt.Sprintf(format, args...)
+ tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"})
+ tl.savedErrorf(msg)
+}
+
+func (tl *TestLogger) getLog() loggerMsg {
+ if len(tl.logs) > 0 {
+ return tl.logs[len(tl.logs)-1]
+ }
+ return loggerMsg{"no logs!", "ERROR"}
+}
+
+func (tl *TestLogger) GetAllLogs() []string {
+ var logs []string
+ for _, l := range tl.logs {
+ logs = append(logs, l.level+":"+l.msg)
+ }
+ return logs
+}
diff --git a/go/event/syslogger/syslogger_test.go b/go/event/syslogger/syslogger_test.go
index 6549e4ca8bb..4847fecac2a 100644
--- a/go/event/syslogger/syslogger_test.go
+++ b/go/event/syslogger/syslogger_test.go
@@ -23,7 +23,6 @@ import (
"testing"
"vitess.io/vitess/go/event"
- "vitess.io/vitess/go/vt/log"
)
type TestEvent struct {
@@ -63,60 +62,6 @@ func (fw *fakeWriter) Info(msg string) error { return fw.write(syslog.LOG_INF
func (fw *fakeWriter) Notice(msg string) error { return fw.write(syslog.LOG_NOTICE, msg) }
func (fw *fakeWriter) Warning(msg string) error { return fw.write(syslog.LOG_WARNING, msg) }
-type loggerMsg struct {
- msg string
- level string
-}
-type testLogger struct {
- logs []loggerMsg
- savedInfof func(format string, args ...any)
- savedWarningf func(format string, args ...any)
- savedErrorf func(format string, args ...any)
-}
-
-func newTestLogger() *testLogger {
- tl := &testLogger{
- savedInfof: log.Infof,
- savedWarningf: log.Warningf,
- savedErrorf: log.Errorf,
- }
- log.Infof = tl.recordInfof
- log.Warningf = tl.recordWarningf
- log.Errorf = tl.recordErrorf
- return tl
-}
-
-func (tl *testLogger) Close() {
- log.Infof = tl.savedInfof
- log.Warningf = tl.savedWarningf
- log.Errorf = tl.savedErrorf
-}
-
-func (tl *testLogger) recordInfof(format string, args ...any) {
- msg := fmt.Sprintf(format, args...)
- tl.logs = append(tl.logs, loggerMsg{msg, "INFO"})
- tl.savedInfof(msg)
-}
-
-func (tl *testLogger) recordWarningf(format string, args ...any) {
- msg := fmt.Sprintf(format, args...)
- tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"})
- tl.savedWarningf(msg)
-}
-
-func (tl *testLogger) recordErrorf(format string, args ...any) {
- msg := fmt.Sprintf(format, args...)
- tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"})
- tl.savedErrorf(msg)
-}
-
-func (tl *testLogger) getLog() loggerMsg {
- if len(tl.logs) > 0 {
- return tl.logs[len(tl.logs)-1]
- }
- return loggerMsg{"no logs!", "ERROR"}
-}
-
// TestSyslog checks that our callback works.
func TestSyslog(t *testing.T) {
writer = &fakeWriter{}
@@ -132,7 +77,7 @@ func TestSyslog(t *testing.T) {
// TestBadWriter verifies we are still triggering (to normal logs) if
// the syslog connection failed
func TestBadWriter(t *testing.T) {
- tl := newTestLogger()
+ tl := NewTestLogger()
defer tl.Close()
writer = nil
diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt
index cd2758007e2..7a287504950 100644
--- a/go/flags/endtoend/mysqlctl.txt
+++ b/go/flags/endtoend/mysqlctl.txt
@@ -11,113 +11,79 @@ The commands are listed below. Use 'mysqlctl -- {-h, --help}' for comm
position
Global flags:
- --alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
- --db-credentials-file string db credentials file; send SIGHUP to reload this file
- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
- --db-credentials-vault-addr string URL to Vault server
- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
- --db_charset string Character set used for this tablet. (default "utf8mb4")
- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
- --db_dba_password string db dba password
- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
- --db_dba_user string db dba user userKey (default "vt_dba")
- --db_flags uint Flag values as defined by MySQL.
- --db_flavor string Flavor overrid. Valid value is FilePos.
- --db_host string The host name for the tcp connection.
- --db_port int tcp port
- --db_server_name string server name of the DB we are connecting to.
- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
- --db_ssl_ca string connection ssl ca
- --db_ssl_ca_path string connection ssl ca path
- --db_ssl_cert string connection ssl certificate
- --db_ssl_key string connection ssl key
- --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
- --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
- --grpc_initial_conn_window_size int gRPC initial connection window size
- --grpc_initial_window_size int gRPC initial window size
- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
- --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
- --grpc_server_initial_window_size int gRPC server initial window size
- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --mysql_port int MySQL port (default 3306)
- --mysql_server_version string MySQL server version to advertise.
- --mysql_socket string Path to the mysqld socket file
- --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
- --port int port for the server
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
- --socket_file string Local unix socket file to listen on
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --tablet_uid uint Tablet UID (default 41983)
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+ --db_port int tcp port
+ --db_server_name string server name of the DB we are connecting to.
+ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+ --db_ssl_ca string connection ssl ca
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
+ --logtostderr log to standard error instead of files
+ --mysql_port int MySQL port (default 3306)
+ --mysql_server_version string MySQL server version to advertise.
+ --mysql_socket string Path to the mysqld socket file
+ --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --socket_file string Local unix socket file to listen on
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+ --tablet_uid uint Tablet UID (default 41983)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt
index 6ac669ed9c9..f0e2b13b8fb 100644
--- a/go/flags/endtoend/mysqlctld.txt
+++ b/go/flags/endtoend/mysqlctld.txt
@@ -2,16 +2,7 @@ Usage of mysqlctld:
--alsologtostderr log to standard error as well as files
--app_idle_timeout duration Idle timeout for app connections (default 1m0s)
--app_pool_size int Size of the connection pool for app connections (default 40)
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
--catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
--db-credentials-file string db credentials file; send SIGHUP to reload this file
--db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
--db-credentials-vault-addr string URL to Vault server
@@ -43,10 +34,6 @@ Usage of mysqlctld:
--db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
--dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
--dba_pool_size int Size of the connection pool for dba connections (default 20)
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
--grpc_auth_mode string Which auth plugin implementation to use (eg: static)
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
@@ -74,17 +61,19 @@ Usage of mysqlctld:
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--mysql_port int MySQL port (default 3306)
--mysql_server_version string MySQL server version to advertise.
--mysql_socket string Path to the mysqld socket file
--mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
--mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
--pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
@@ -95,18 +84,11 @@ Usage of mysqlctld:
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
--socket_file string Local unix socket file to listen on
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
--tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
--tablet_uid uint Tablet UID (default 41983)
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--wait_time duration How long to wait for mysqld startup or shutdown (default 5m0s)
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt
index 4b158925437..6c93e5d8542 100644
--- a/go/flags/endtoend/vtaclcheck.txt
+++ b/go/flags/endtoend/vtaclcheck.txt
@@ -1,19 +1,21 @@
Usage of vtaclcheck:
- --acl-file string The path of the JSON ACL file to check
- --alsologtostderr log to standard error as well as files
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --static-auth-file string The path of the auth_server_static JSON file to check
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --acl-file string The path of the JSON ACL file to check
+ --alsologtostderr log to standard error as well as files
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
+ --logtostderr log to standard error instead of files
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --static-auth-file string The path of the auth_server_static JSON file to check
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt
index 98f145904fa..8fe2971d3cf 100644
--- a/go/flags/endtoend/vtbackup.txt
+++ b/go/flags/endtoend/vtbackup.txt
@@ -9,10 +9,11 @@ Usage of vtbackup:
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
--backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
+ --compression-engine-name string compressor engine used for compression. (default "pargzip")
+ --compression-level int what level to pass to the compressor. (default 1)
--concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
--consul_auth_static_file string JSON File to read the topos/tokens from.
--db-credentials-file string db credentials file; send SIGHUP to reload this file
@@ -63,6 +64,10 @@ Usage of vtbackup:
--db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
--db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
--detach detached mode - run backups detached from the terminal
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --external-compressor string command with arguments to use when compressing a backup.
+ --external-compressor-extension string extension to use when using an external compressor.
+ --external-decompressor string command with arguments to use when decompressing a backup.
--file_backup_storage_root string Root directory for the file backup storage.
--gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
--gcs_backup_storage_root string Root prefix for all backup-related object names.
@@ -73,6 +78,8 @@ Usage of vtbackup:
--grpc_initial_window_size int gRPC initial window size
--grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
-h, --help display usage and exit
@@ -84,10 +91,13 @@ Usage of vtbackup:
--keep-alive-timeout duration Wait until timeout elapses after a successful backup before shutting down.
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup.
--min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1)
@@ -110,6 +120,12 @@ Usage of vtbackup:
--mycnf_slow_log_path string mysql slow query log path
--mycnf_socket_file string mysql socket file
--mycnf_tmp_dir string mysql tmp directory
+ --mysql-shell-backup-location string location where the backup will be stored
+ --mysql-shell-dump-flags string flags to pass to mysql shell dump utility. This should be a JSON string and will be saved in the MANIFEST (default "{\"threads\": 4}")
+ --mysql-shell-flags string execution flags to pass to mysqlsh binary to be used during dump/load (default "--defaults-file=/dev/null --js -h localhost")
+ --mysql-shell-load-flags string flags to pass to mysql shell load utility. This should be a JSON string (default "{\"threads\": 4, \"loadUsers\": true, \"updateGtidSet\": \"replace\", \"skipBinlog\": true, \"progressFile\": \"\"}")
+ --mysql-shell-should-drain decide if we should drain while taking a backup or continue to serving traffic
+ --mysql-shell-speedup-restore speed up restore by disabling redo logging and double write buffer during the restore process
--mysql_port int mysql port (default 3306)
--mysql_server_version string MySQL server version to advertise.
--mysql_socket string path to the mysql socket
@@ -117,7 +133,7 @@ Usage of vtbackup:
--port int port for the server
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
@@ -131,7 +147,12 @@ Usage of vtbackup:
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
--sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
--tablet_manager_grpc_cert string the cert to use to connect
--tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
@@ -140,9 +161,13 @@ Usage of vtbackup:
--tablet_manager_grpc_key string the key to use to connect
--tablet_manager_grpc_server_name string the server name to use to validate server certificate
--tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
--topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
--topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
--topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
@@ -157,14 +182,14 @@ Usage of vtbackup:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
--xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
--xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
--xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtctlclient.txt b/go/flags/endtoend/vtctlclient.txt
index 8896b7b3f72..a3bef6b2280 100644
--- a/go/flags/endtoend/vtctlclient.txt
+++ b/go/flags/endtoend/vtctlclient.txt
@@ -10,29 +10,33 @@ Usage of vtctlclient:
--grpc_initial_window_size int gRPC initial window size
--grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
-h, --help display usage and exit
--jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--server string server to use for connection
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
--tracer string tracing service to use (default "noop")
--tracing-enable-logging whether to enable logging in the tracing service
--tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
--tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
--vtctld_grpc_ca string the server ca to use to validate servers when connecting
--vtctld_grpc_cert string the cert to use to connect
diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt
index 5895327dc5f..be478e183c4 100644
--- a/go/flags/endtoend/vtctld.txt
+++ b/go/flags/endtoend/vtctld.txt
@@ -1,9 +1,6 @@
Usage of vtctld:
- --action_timeout duration time to wait for an action before resorting to force (default 2m0s)
- --allowed_tablet_types []topodatapb.TabletType Specifies the tablet types this vtgate is allowed to route queries to.
+ --action_timeout duration time to wait for an action before resorting to force (default 1m0s)
--alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
--azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
--azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
--azblob_backup_container_name string Azure Blob Container Name.
@@ -12,7 +9,6 @@ Usage of vtctld:
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
--backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
@@ -25,12 +21,9 @@ Usage of vtctld:
--consul_auth_static_file string JSON File to read the topos/tokens from.
--datadog-agent-host string host to send spans to. if empty, no tracing will be done
--datadog-agent-port string port to send spans to. if empty, no tracing will be done
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
--disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
--durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default "none")
- --enable_realtime_stats Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.
- --enable_vtctld_ui If true, the vtctld web interface will be enabled. Default is true. (default true)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--external-compressor string command with arguments to use when compressing a backup.
--external-compressor-extension string extension to use when using an external compressor.
--external-decompressor string command with arguments to use when decompressing a backup.
@@ -54,6 +47,8 @@ Usage of vtctld:
--grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
--grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
--grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
--grpc_prometheus Enable gRPC monitoring with Prometheus.
@@ -62,31 +57,29 @@ Usage of vtctld:
--grpc_server_initial_window_size int gRPC server initial window size
--grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
--grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+ --healthcheck-dial-concurrency int Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000. (default 1024)
-h, --help display usage and exit
--jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
- --mysql_server_version string MySQL server version to advertise.
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--opentsdb_uri string URI of opentsdb /api/put method
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
--port int port for the server
--pprof strings enable profiling
--proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
--s3_backup_aws_retries int AWS request retries. (default -1)
@@ -105,9 +98,14 @@ Usage of vtctld:
--service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
--sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
--tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.
--tablet_grpc_ca string the server ca to use to validate servers when connecting
--tablet_grpc_cert string the cert to use to connect
--tablet_grpc_crl string the server crl to use to validate server certificates when connecting
@@ -126,9 +124,13 @@ Usage of vtctld:
--tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
--tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
--tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
--topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
--topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
--topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
@@ -151,29 +153,11 @@ Usage of vtctld:
--tracing-enable-logging whether to enable logging in the tracing service
--tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
--tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vtctl_healthcheck_retry_delay duration delay before retrying a failed healthcheck (default 5s)
--vtctl_healthcheck_timeout duration the health check timeout period (default 1m0s)
--vtctl_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
--vtctld_sanitize_log_messages When true, vtctld sanitizes logging.
--vtctld_show_topology_crud Controls the display of the CRUD topology actions in the vtctld UI. (default true)
- --vtgate_grpc_ca string the server ca to use to validate servers when connecting
- --vtgate_grpc_cert string the cert to use to connect
- --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
- --vtgate_grpc_key string the key to use to connect
- --vtgate_grpc_server_name string the server name to use to validate server certificate
- --web_dir string NOT USED, here for backward compatibility
- --web_dir2 string NOT USED, here for backward compatibility
- --workflow_manager_disable strings comma separated list of workflow types to disable
- --workflow_manager_init Initialize the workflow manager in this vtctld instance.
- --workflow_manager_use_election if specified, will use a topology server-based master election to ensure only one workflow manager is active at a time.
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt
index 3c27875d0da..54f2dd8de3f 100644
--- a/go/flags/endtoend/vtctldclient.txt
+++ b/go/flags/endtoend/vtctldclient.txt
@@ -1,6 +1,7 @@
Executes a cluster management command on the remote vtctld server.
Usage:
+ vtctldclient [flags]
vtctldclient [command]
Available Commands:
@@ -45,9 +46,9 @@ Available Commands:
GetTablet Outputs a JSON structure that contains information about the tablet.
GetTabletVersion Print the version of a tablet from its debug vars.
GetTablets Looks up tablets according to filter criteria.
+ GetTopologyPath Gets the file located at the specified path in the topology server.
GetVSchema Prints a JSON representation of a keyspace's topo record.
GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.
- InitShardPrimary Sets the initial primary for the shard.
LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort.
PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations.
PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running.
@@ -83,13 +84,13 @@ Available Commands:
ValidateSchemaKeyspace Validates that the schema on the primary tablet for shard 0 matches the schema on all other tablets in the keyspace.
ValidateShard Validates that all nodes reachable from the specified shard are consistent.
ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace.
+ ValidateVersionShard Validates that the version on the primary matches all of the replicas.
completion Generate the autocompletion script for the specified shell
help Help about any command
Flags:
--action_timeout duration timeout for the total command (default 1h0m0s)
--alsologtostderr log to standard error as well as files
- --emit_stats If set, emit stats to push-based monitoring and stats backends
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
--grpc_enable_tracing Enable gRPC tracing.
@@ -97,28 +98,27 @@ Flags:
--grpc_initial_window_size int gRPC initial window size
--grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
-h, --help help for vtctldclient
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--mysql_server_version string MySQL server version to advertise.
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--server string server to use for connection (required)
- --stats_backend string The name of the registered push-based monitoring/stats backend to use
- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
- --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
- --stats_drop_variables string Variables to be dropped from the list of exported variables.
- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
-v, --v Level log level for V logs
--version version for vtctldclient
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
--vtctld_grpc_ca string the server ca to use to validate servers when connecting
--vtctld_grpc_cert string the cert to use to connect
diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt
index df5fea9d1d6..7b25ec7fa5f 100644
--- a/go/flags/endtoend/vtexplain.txt
+++ b/go/flags/endtoend/vtexplain.txt
@@ -1,36 +1,21 @@
Usage of vtexplain:
--alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--batch-interval duration Interval between logical time slots. (default 10ms)
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
--dbname string Optional database target to override normal routing
--default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
--execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi")
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
-h, --help display usage and exit
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
--ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_queries_to_file string Enable query logging to the specified file
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
--mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
@@ -53,19 +38,14 @@ Usage of vtexplain:
--mysql_server_write_timeout duration connection write timeout (default 0s)
--mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
--mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
--normalize Whether to enable vtgate normalization
--output-mode string Output in human-friendly text or json (default "text")
--planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
--pprof strings enable profiling
--proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
--replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW")
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
--schema string The SQL table schema
--schema-file string Identifies the file that contains the SQL table schema
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
@@ -74,21 +54,9 @@ Usage of vtexplain:
--sql-file string Identifies the file that contains the SQL commands to analyze
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
--sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --topo_global_root string the path of the global topology data in the global topology server
- --topo_global_server_address string the address of the global topology server
- --topo_implementation string the topology implementation to use
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vschema string Identifies the VTGate routing schema
--vschema-file string Identifies the VTGate routing schema file
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt
index 8a45969a20a..2839ba3ea6e 100644
--- a/go/flags/endtoend/vtgate.txt
+++ b/go/flags/endtoend/vtgate.txt
@@ -1,6 +1,8 @@
Usage of vtgate:
- --allowed_tablet_types []topodatapb.TabletType Specifies the tablet types this vtgate is allowed to route queries to.
+ --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.
--alsologtostderr log to standard error as well as files
+ --balancer-keyspaces strings When in balanced mode, a comma-separated list of keyspaces for which to use the balancer (optional)
+ --balancer-vtgate-cells strings When in balanced mode, a comma-separated list of cells that contain vtgates (required)
--buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
--buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events")
--buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
@@ -9,7 +11,7 @@ Usage of vtgate:
--buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
--buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
--catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --cell string cell to use (default "test_nj")
+ --cell string cell to use
--cells_to_watch string comma-separated list of cells for watching tablets
--consul_auth_static_file string JSON File to read the topos/tokens from.
--datadog-agent-host string host to send spans to. if empty, no tracing will be done
@@ -19,6 +21,8 @@ Usage of vtgate:
--default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
--discovery_high_replication_lag_minimum_serving duration Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag. (default 2h0m0s)
--discovery_low_replication_lag duration Threshold below which replication lag is considered low enough to be healthy. (default 30s)
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --enable-balancer Enable the tablet balancer to evenly spread query load for a given tablet type
--enable-partial-keyspace-migration (Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false)
--enable_buffer Enable buffering (stalling) of primary traffic during failovers.
--enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
@@ -31,6 +35,7 @@ Usage of vtgate:
--gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
--gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
--gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
+ --gateway_route_replica_to_rdonly route REPLICA queries to RDONLY tablets as well as REPLICA tablets
--grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.
--grpc_auth_mode string Which auth plugin implementation to use (eg: static)
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
@@ -49,6 +54,8 @@ Usage of vtgate:
--grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
--grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
--grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
--grpc_prometheus Enable gRPC monitoring with Prometheus.
@@ -58,6 +65,7 @@ Usage of vtgate:
--grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
--grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
--grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.
+ --healthcheck-dial-concurrency int Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000. (default 1024)
--healthcheck_retry_delay duration health check retry delay (default 2ms)
--healthcheck_timeout duration the health check timeout period (default 1m0s)
-h, --help display usage and exit
@@ -67,12 +75,15 @@ Usage of vtgate:
--keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
--legacy_replication_lag_algorithm Use the legacy algorithm when selecting vttablets for serving. (default true)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
--lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_queries_to_file string Enable query logging to the specified file
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
--max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
@@ -116,8 +127,9 @@ Usage of vtgate:
--mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish (default 0s)
--mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
--no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
+ --no_vstream_copy when set to true, vstream copy will not be allowed - temporary until we can properly support RDONLY for this
--normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--opentsdb_uri string URI of opentsdb /api/put method
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
@@ -130,8 +142,9 @@ Usage of vtgate:
--querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
--querylog-format string format for query logs ("text" or "json") (default "text")
--querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --querylog-sample-rate float Sample rate for logging queries. Value must be between 0.0 (no logging) and 1.0 (all queries)
--redact-debug-ui-queries redact full queries and bind variables from debug UI
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--retry-count int retry count (default 2)
--schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
--schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
@@ -142,10 +155,17 @@ Usage of vtgate:
--srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
--srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
--srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
--statsd_address string Address for statsd client
--statsd_sample_rate float Sample rate for statsd metrics (default 1)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
--stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
+ --tablet-filter-tags StringMap Specifies a comma-separated list of tablet tags (as key:value pairs) to filter the tablets to watch.
--tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.
--tablet_grpc_ca string the server ca to use to validate servers when connecting
--tablet_grpc_cert string the cert to use to connect
@@ -155,11 +175,15 @@ Usage of vtgate:
--tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
--tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
--tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
- --tablet_types_to_wait string wait till connected for specified tablet types during Gateway initialization
+ --tablet_types_to_wait strings Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.
--tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
--topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
--topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
--topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
@@ -183,9 +207,9 @@ Usage of vtgate:
--tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
--tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
--transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI")
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
--vtctld_addr string address of a vtctld instance
--vtgate-config-terse-errors prevent bind vars from escaping in returned errors
diff --git a/go/flags/endtoend/vtgr.txt b/go/flags/endtoend/vtgr.txt
index d4ed0501d9e..170600d1dd0 100644
--- a/go/flags/endtoend/vtgr.txt
+++ b/go/flags/endtoend/vtgr.txt
@@ -6,6 +6,7 @@ Usage of vtgr:
--db_config string Full path to db config file that will be used by VTGR.
--db_flavor string MySQL flavor override. (default "MySQL56")
--db_port int Local mysql port, set this to enable local fast check.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold.
--gr_port int Port to bootstrap a MySQL group. (default 33061)
--group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.
@@ -16,25 +17,35 @@ Usage of vtgr:
--grpc_initial_window_size int gRPC initial window size
--grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
-h, --help display usage and exit
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--ping_tablet_timeout duration time to wait when we ping a tablet (default 2s)
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--refresh_interval duration Refresh interval to load tablets. (default 10s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--scan_interval duration Scan interval to diagnose and repair. (default 3s)
--scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
--tablet_manager_grpc_cert string the cert to use to connect
--tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
@@ -43,9 +54,13 @@ Usage of vtgr:
--tablet_manager_grpc_key string the key to use to connect
--tablet_manager_grpc_server_name string the server name to use to validate server certificate
--tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
--topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
--topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
--topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
@@ -60,7 +75,7 @@ Usage of vtgr:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vtgr_config string Config file for vtgr.
diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt
index e8270b9f5e7..142aafb835a 100644
--- a/go/flags/endtoend/vtorc.txt
+++ b/go/flags/endtoend/vtorc.txt
@@ -8,6 +8,7 @@ Usage of vtorc:
--clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
--config string config file name
--consul_auth_static_file string JSON File to read the topos/tokens from.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
--grpc_enable_tracing Enable gRPC tracing.
@@ -15,6 +16,8 @@ Usage of vtorc:
--grpc_initial_window_size int gRPC initial window size
--grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
-h, --help display usage and exit
@@ -22,13 +25,15 @@ Usage of vtorc:
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --lock-shard-timeout duration Duration for which a shard lock is held when running a recovery (default 30s)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
--port int port for the server
@@ -38,12 +43,18 @@ Usage of vtorc:
--reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s)
--recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s)
--recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--shutdown_wait_time duration Maximum time to wait for VTOrc to release all the locks that it is holding before shutting down on SIGTERM (default 30s)
--snapshot-topology-interval duration Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours
--sqlite-data-file string SQLite Datafile to use as VTOrc's database (default "file::memory:?mode=memory&cache=shared")
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
--tablet_manager_grpc_cert string the cert to use to connect
--tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
@@ -53,9 +64,13 @@ Usage of vtorc:
--tablet_manager_grpc_server_name string the server name to use to validate server certificate
--tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
--topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s)
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
--topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
--topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
--topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
@@ -73,7 +88,7 @@ Usage of vtorc:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s)
diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt
index 9340e5a0c01..0251d967520 100644
--- a/go/flags/endtoend/vttablet.txt
+++ b/go/flags/endtoend/vttablet.txt
@@ -1,377 +1,405 @@
Usage of vttablet:
- --alsologtostderr log to standard error as well as files
- --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
- --app_pool_size int Size of the connection pool for app connections (default 40)
- --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
- --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
- --azblob_backup_container_name string Azure Blob Container Name.
- --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
- --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
- --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
- --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
- --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
- --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
- --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
- --binlog_host string PITR restore parameter: hostname/IP of binlog server.
- --binlog_password string PITR restore parameter: password of binlog server.
- --binlog_player_grpc_ca string the server ca to use to validate servers when connecting
- --binlog_player_grpc_cert string the cert to use to connect
- --binlog_player_grpc_crl string the server crl to use to validate server certificates when connecting
- --binlog_player_grpc_key string the key to use to connect
- --binlog_player_grpc_server_name string the server name to use to validate server certificate
- --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
- --binlog_port int PITR restore parameter: port of binlog server.
- --binlog_ssl_ca string PITR restore parameter: Filename containing TLS CA certificate to verify binlog server TLS certificate against.
- --binlog_ssl_cert string PITR restore parameter: Filename containing mTLS client certificate to present to binlog server as authentication.
- --binlog_ssl_key string PITR restore parameter: Filename containing mTLS client private key for use in binlog server authentication.
- --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in --binlog_host).
- --binlog_user string PITR restore parameter: username of binlog server.
- --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
- --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
- --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
- --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
- --compression-engine-name string compressor engine used for compression. (default "pargzip")
- --compression-level int what level to pass to the compressor. (default 1)
- --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152)
- --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728)
- --consul_auth_static_file string JSON File to read the topos/tokens from.
- --datadog-agent-host string host to send spans to. if empty, no tracing will be done
- --datadog-agent-port string port to send spans to. if empty, no tracing will be done
- --db-credentials-file string db credentials file; send SIGHUP to reload this file
- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
- --db-credentials-vault-addr string URL to Vault server
- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
- --db_allprivs_password string db allprivs password
- --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
- --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
- --db_app_password string db app password
- --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
- --db_app_user string db app user userKey (default "vt_app")
- --db_appdebug_password string db appdebug password
- --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
- --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
- --db_charset string Character set used for this tablet. (default "utf8mb4")
- --db_conn_query_info enable parsing and processing of QUERY_OK info fields
- --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
- --db_dba_password string db dba password
- --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
- --db_dba_user string db dba user userKey (default "vt_dba")
- --db_erepl_password string db erepl password
- --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
- --db_erepl_user string db erepl user userKey (default "vt_erepl")
- --db_filtered_password string db filtered password
- --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
- --db_filtered_user string db filtered user userKey (default "vt_filtered")
- --db_flags uint Flag values as defined by MySQL.
- --db_flavor string Flavor overrid. Valid value is FilePos.
- --db_host string The host name for the tcp connection.
- --db_port int tcp port
- --db_repl_password string db repl password
- --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
- --db_repl_user string db repl user userKey (default "vt_repl")
- --db_server_name string server name of the DB we are connecting to.
- --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
- --db_ssl_ca string connection ssl ca
- --db_ssl_ca_path string connection ssl ca path
- --db_ssl_cert string connection ssl certificate
- --db_ssl_key string connection ssl key
- --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
- --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
- --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
- --dba_pool_size int Size of the connection pool for dba connections (default 20)
- --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
- --disable-replication-manager Disable replication manager to prevent replication repairs.
- --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
- --enable-consolidator Synonym to -enable_consolidator (default true)
- --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
- --enable-lag-throttler Synonym to -enable_lag_throttler
- --enable-query-plan-field-caching Synonym to -enable_query_plan_field_caching (default true)
- --enable-tx-throttler Synonym to -enable_tx_throttler
- --enable_consolidator This option enables the query consolidator. (default true)
- --enable_consolidator_replicas This option enables the query consolidator only on replicas.
- --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
- --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
- --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
- --enable_query_plan_field_caching (DEPRECATED) This option fetches & caches fields (columns) when storing query plans (default true)
- --enable_replication_reporter Use polling to track replication lag.
- --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
- --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
- --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
- --enforce-tableacl-config if this flag is true, vttablet will fail to start if a valid tableacl config does not exist
- --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
- --external-compressor string command with arguments to use when compressing a backup.
- --external-compressor-extension string extension to use when using an external compressor.
- --external-decompressor string command with arguments to use when decompressing a backup.
- --file_backup_storage_root string Root directory for the file backup storage.
- --filecustomrules string file based custom rule path
- --filecustomrules_watch set up a watch on the target file and reload query rules when it changes
- --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
- --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
- --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
- --gcs_backup_storage_root string Root prefix for all backup-related object names.
- --gh-ost-path string override default gh-ost binary full path
- --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
- --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
- --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
- --grpc_auth_static_password_file string JSON File to read the users/passwords from.
- --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
- --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
- --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
- --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
- --grpc_enable_tracing Enable gRPC tracing.
- --grpc_initial_conn_window_size int gRPC initial connection window size
- --grpc_initial_window_size int gRPC initial window size
- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
- --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
- --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
- --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
- --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
- --grpc_prometheus Enable gRPC monitoring with Prometheus.
- --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
- --grpc_server_initial_conn_window_size int gRPC server initial connection window size
- --grpc_server_initial_window_size int gRPC server initial window size
- --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
- --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
- --health_check_interval duration Interval between health checks (default 20s)
- --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
- --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
- --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
- -h, --help display usage and exit
- --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
- --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
- --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
- --init_db_name_override string (init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_
- --init_keyspace string (init parameter) keyspace to use for this tablet
- --init_populate_metadata (init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.
- --init_shard string (init parameter) shard to use for this tablet
- --init_tablet_type string (init parameter) the tablet type to use for this tablet.
- --init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet
- --init_timeout duration (init parameter) timeout to use for the init phase. (default 1m0s)
- --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_queries Enable query logging to syslog.
- --log_queries_to_file string Enable query logging to the specified file
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256)
- --migration_check_interval duration Interval between migration checks (default 1m0s)
- --mycnf-file string path to my.cnf, if reading all config params from there
- --mycnf_bin_log_path string mysql binlog path
- --mycnf_data_dir string data directory for mysql
- --mycnf_error_log_path string mysql error log path
- --mycnf_general_log_path string mysql general log path
- --mycnf_innodb_data_home_dir string Innodb data home directory
- --mycnf_innodb_log_group_home_dir string Innodb log group home directory
- --mycnf_master_info_file string mysql master.info file
- --mycnf_mysql_port int port mysql is listening on
- --mycnf_pid_file string mysql pid file
- --mycnf_relay_log_index_path string mysql relay log index path
- --mycnf_relay_log_info_path string mysql relay log info path
- --mycnf_relay_log_path string mysql relay log path
- --mycnf_secure_file_priv string mysql path for loading secure files
- --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
- --mycnf_slow_log_path string mysql slow query log path
- --mycnf_socket_file string mysql socket file
- --mycnf_tmp_dir string mysql tmp directory
- --mysql_server_version string MySQL server version to advertise.
- --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
- --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
- --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
- --opentsdb_uri string URI of opentsdb /api/put method
- --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --pitr_gtid_lookup_timeout duration PITR restore parameter: timeout for fetching gtid from timestamp. (default 1m0s)
- --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
- --port int port for the server
- --pprof strings enable profiling
- --pt-osc-path string override default pt-online-schema-change binary full path
- --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s)
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
- --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
- --querylog-format string format for query logs ("text" or "json") (default "text")
- --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
- --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
- --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
- --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
- --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
- --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
- --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
- --queryserver-config-olap-transaction-timeout float query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30)
- --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
- --queryserver-config-pool-prefill-parallelism int (DEPRECATED) query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
- --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
- --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
- --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
- --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
- --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
- --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
- --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
- --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
- --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
- --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
- --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
- --queryserver-config-stream-pool-prefill-parallelism int (DEPRECATED) query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism
- --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
- --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
- --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
- --queryserver-config-strict-table-acl only allow queries that pass table acl checks
- --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
- --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
- --queryserver-config-transaction-prefill-parallelism int (DEPRECATED) query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.
- --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
- --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
- --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
- --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
- --queryserver-enable-settings-pool Enable pooling of connections with modified system settings
- --queryserver_enable_online_ddl Enable online DDL. (default true)
- --redact-debug-ui-queries redact full queries and bind variables from debug UI
- --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
- --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
- --remote_operation_timeout duration time to wait for a remote operation (default 30s)
- --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
- --restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
- --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
- --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
- --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
- --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
- --s3_backup_aws_region string AWS region to use. (default "us-east-1")
- --s3_backup_aws_retries int AWS request retries. (default -1)
- --s3_backup_force_path_style force the s3 path style.
- --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
- --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
- --s3_backup_storage_bucket string S3 bucket to use for backups.
- --s3_backup_storage_root string root prefix for all backup-related object names.
- --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
- --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
- --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
- --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s)
- --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
- --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
- --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
- --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
- --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
- --srv_topo_timeout duration topo server timeout (default 5s)
- --statsd_address string Address for statsd client
- --statsd_sample_rate float Sample rate for statsd metrics (default 1)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
- --table-acl-config string path to table access checker config file; send SIGHUP to reload this file
- --table-acl-config-reload-interval duration Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload
- --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop")
- --tablet-path string tablet alias
- --tablet_config string YAML file config for tablet
- --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --tablet_grpc_ca string the server ca to use to validate servers when connecting
- --tablet_grpc_cert string the cert to use to connect
- --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
- --tablet_grpc_key string the key to use to connect
- --tablet_grpc_server_name string the server name to use to validate server certificate
- --tablet_hostname string if not empty, this hostname will be assumed instead of trying to resolve it
- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
- --tablet_manager_grpc_cert string the cert to use to connect
- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
- --tablet_manager_grpc_key string the key to use to connect
- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
- --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
- --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
- --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
- --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
- --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
- --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
- --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
- --topo_consul_lock_session_ttl string TTL for consul session.
- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
- --topo_global_root string the path of the global topology data in the global topology server
- --topo_global_server_address string the address of the global topology server
- --topo_implementation string the topology implementation to use
- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- --topocustomrule_cell string topo cell for customrules file. (default "global")
- --topocustomrule_path string path for customrules file. Disabled if empty.
- --tracer string tracing service to use (default "noop")
- --tracing-enable-logging whether to enable logging in the tracing service
- --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
- --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
- --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
- --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog")
- --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
- --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
- --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
- --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
- --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
- --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
- --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
- --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
- --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
- --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells
- --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
- --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
- --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
- --use_super_read_only Set super_read_only flag when performing planned failover.
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
- --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
- --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
- --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
- --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
- --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
- --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
- --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
- --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence
- --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
- --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
- --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
- --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY")
- --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864)
- --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
- --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
- --vtctld_addr string address of a vtctld instance
- --vtgate_protocol string how to talk to vtgate (default "grpc")
- --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/")
- --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear
- --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
- --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
- --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
- --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+ --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
+ --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.
+ --azblob_backup_container_name string Azure Blob Container Name.
+ --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1)
+ --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').
+ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+ --backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
+ --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
+ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --binlog_host string PITR restore parameter: hostname/IP of binlog server.
+ --binlog_password string PITR restore parameter: password of binlog server.
+ --binlog_player_grpc_ca string the server ca to use to validate servers when connecting
+ --binlog_player_grpc_cert string the cert to use to connect
+ --binlog_player_grpc_crl string the server crl to use to validate server certificates when connecting
+ --binlog_player_grpc_key string the key to use to connect
+ --binlog_player_grpc_server_name string the server name to use to validate server certificate
+ --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
+ --binlog_port int PITR restore parameter: port of binlog server.
+ --binlog_ssl_ca string PITR restore parameter: Filename containing TLS CA certificate to verify binlog server TLS certificate against.
+ --binlog_ssl_cert string PITR restore parameter: Filename containing mTLS client certificate to present to binlog server as authentication.
+ --binlog_ssl_key string PITR restore parameter: Filename containing mTLS client private key for use in binlog server authentication.
+ --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in --binlog_host).
+ --binlog_user string PITR restore parameter: username of binlog server.
+ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
+ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json")
+ --compression-engine-name string compressor engine used for compression. (default "pargzip")
+ --compression-level int what level to pass to the compressor. (default 1)
+ --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152)
+ --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728)
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+ --db_allprivs_password string db allprivs password
+ --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
+ --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
+ --db_app_password string db app password
+ --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
+ --db_app_user string db app user userKey (default "vt_app")
+ --db_appdebug_password string db appdebug password
+ --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
+ --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
+ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_erepl_password string db erepl password
+ --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
+ --db_erepl_user string db erepl user userKey (default "vt_erepl")
+ --db_filtered_password string db filtered password
+ --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
+ --db_filtered_user string db filtered user userKey (default "vt_filtered")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+ --db_port int tcp port
+ --db_repl_password string db repl password
+ --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
+ --db_repl_user string db repl user userKey (default "vt_repl")
+ --db_server_name string server name of the DB we are connecting to.
+ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+ --db_ssl_ca string connection ssl ca
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+ --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+ --disable-replication-manager Disable replication manager to prevent replication repairs.
+ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --enable-consolidator Synonym to -enable_consolidator (default true)
+ --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+ --enable-lag-throttler Synonym to -enable_lag_throttler
+ --enable-per-workload-table-metrics If true, query counts and query error metrics include a label that identifies the workload
+ --enable-tx-throttler Synonym to -enable_tx_throttler
+ --enable_consolidator This option enables the query consolidator. (default true)
+ --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+ --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+ --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+ --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
+ --enable_replication_reporter Use polling to track replication lag.
+ --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+ --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+ --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+ --enforce-tableacl-config if this flag is true, vttablet will fail to start if a valid tableacl config does not exist
+ --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+ --external-compressor string command with arguments to use when compressing a backup.
+ --external-compressor-extension string extension to use when using an external compressor.
+ --external-decompressor string command with arguments to use when decompressing a backup.
+ --file_backup_storage_root string Root directory for the file backup storage.
+ --filecustomrules string file based custom rule path
+ --filecustomrules_watch set up a watch on the target file and reload query rules when it changes
+ --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
+ --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
+ --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups.
+ --gcs_backup_storage_root string Root prefix for all backup-related object names.
+ --gh-ost-path string override default gh-ost binary full path
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
+ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+ --health_check_interval duration Interval between health checks (default 20s)
+ --healthcheck-dial-concurrency int Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000. (default 1024)
+ --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks.
+ --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+ --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
+ -h, --help display usage and exit
+ --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+ --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+ --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+ --init_db_name_override string (init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_
+ --init_keyspace string (init parameter) keyspace to use for this tablet
+ --init_populate_metadata (init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.
+ --init_shard string (init parameter) shard to use for this tablet
+ --init_tablet_type string (init parameter) the tablet type to use for this tablet.
+ --init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet
+ --init_timeout duration (init parameter) timeout to use for the init phase. (default 1m0s)
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
+ --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
+ --log_queries Enable query logging to syslog.
+ --log_queries_to_file string Enable query logging to the specified file
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
+ --logtostderr log to standard error instead of files
+ --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256)
+ --migration_check_interval duration Interval between migration checks (default 1m0s)
+ --mycnf-file string path to my.cnf, if reading all config params from there
+ --mycnf_bin_log_path string mysql binlog path
+ --mycnf_data_dir string data directory for mysql
+ --mycnf_error_log_path string mysql error log path
+ --mycnf_general_log_path string mysql general log path
+ --mycnf_innodb_data_home_dir string Innodb data home directory
+ --mycnf_innodb_log_group_home_dir string Innodb log group home directory
+ --mycnf_master_info_file string mysql master.info file
+ --mycnf_mysql_port int port mysql is listening on
+ --mycnf_pid_file string mysql pid file
+ --mycnf_relay_log_index_path string mysql relay log index path
+ --mycnf_relay_log_info_path string mysql relay log info path
+ --mycnf_relay_log_path string mysql relay log path
+ --mycnf_secure_file_priv string mysql path for loading secure files
+ --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
+ --mycnf_slow_log_path string mysql slow query log path
+ --mycnf_socket_file string mysql socket file
+ --mycnf_tmp_dir string mysql tmp directory
+ --mysql-shell-backup-location string location where the backup will be stored
+ --mysql-shell-dump-flags string flags to pass to mysql shell dump utility. This should be a JSON string and will be saved in the MANIFEST (default "{\"threads\": 4}")
+ --mysql-shell-flags string execution flags to pass to mysqlsh binary to be used during dump/load (default "--defaults-file=/dev/null --js -h localhost")
+ --mysql-shell-load-flags string flags to pass to mysql shell load utility. This should be a JSON string (default "{\"threads\": 4, \"loadUsers\": true, \"updateGtidSet\": \"replace\", \"skipBinlog\": true, \"progressFile\": \"\"}")
+ --mysql-shell-should-drain decide if we should drain while taking a backup or continue to serving traffic
+ --mysql-shell-speedup-restore speed up restore by disabling redo logging and double write buffer during the restore process
+ --mysql_server_version string MySQL server version to advertise.
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --opentsdb_uri string URI of opentsdb /api/put method
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pitr_gtid_lookup_timeout duration PITR restore parameter: timeout for fetching gtid from timestamp. (default 1m0s)
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+ --pprof strings enable profiling
+ --pt-osc-path string override default pt-online-schema-change binary full path
+ --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s)
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
+ --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+ --querylog-format string format for query logs ("text" or "json") (default "text")
+ --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --querylog-sample-rate float Sample rate for logging queries. Value must be between 0.0 (no logging) and 1.0 (all queries)
+ --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+ --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+ --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+ --queryserver-config-idle-timeout DurationOrSecondsFloat query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800)
+ --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+ --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
+ --queryserver-config-olap-transaction-timeout DurationOrSecondsFloat query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30)
+ --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+ --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+ --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
+ --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+ --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
+ --queryserver-config-query-pool-timeout DurationOrSecondsFloat query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.
+ --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
+ --queryserver-config-query-timeout DurationOrSecondsFloat query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30)
+ --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
+ --queryserver-config-schema-change-signal-interval DurationOrSecondsFloat query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5)
+ --queryserver-config-schema-reload-time DurationOrSecondsFloat query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800)
+ --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+ --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+ --queryserver-config-stream-pool-timeout DurationOrSecondsFloat query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.
+ --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+ --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+ --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+ --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+ --queryserver-config-transaction-timeout DurationOrSecondsFloat query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30)
+ --queryserver-config-txpool-timeout DurationOrSecondsFloat query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1)
+ --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+ --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+ --queryserver-enable-settings-pool Enable pooling of connections with modified system settings
+ --queryserver_enable_online_ddl Enable online DDL. (default true)
+ --redact-debug-ui-queries redact full queries and bind variables from debug UI
+ --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+ --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
+ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --restore-from-backup-allowed-engines strings (init restore parameter) if set, only backups taken with the specified engines are eligible to be restored
+ --restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
+ --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
+ --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
+ --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
+ --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
+ --s3_backup_aws_region string AWS region to use. (default "us-east-1")
+ --s3_backup_aws_retries int AWS request retries. (default -1)
+ --s3_backup_force_path_style force the s3 path style.
+ --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff")
+ --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).
+ --s3_backup_storage_bucket string S3 bucket to use for backups.
+ --s3_backup_storage_root string root prefix for all backup-related object names.
+ --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections.
+ --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+ --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s)
+ --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+ --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+ --srv_topo_timeout duration topo server timeout (default 5s)
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --statsd_address string Address for statsd client
+ --statsd_sample_rate float Sample rate for statsd metrics (default 1)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
+ --table-acl-config string path to table access checker config file; send SIGHUP to reload this file
+ --table-acl-config-reload-interval duration Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload
+ --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop")
+ --tablet-path string tablet alias
+ --tablet_config string YAML file config for tablet
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_grpc_key string the key to use to connect
+ --tablet_grpc_server_name string the server name to use to validate server certificate
+ --tablet_hostname string if not empty, this hostname will be assumed instead of trying to resolve it
+ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_manager_grpc_cert string the cert to use to connect
+ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_manager_grpc_key string the key to use to connect
+ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
+ --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
+ --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
+ --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
+ --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}")
+ --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
+ --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
+ --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308)
+ --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
+ --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+ --topo_global_root string the path of the global topology data in the global topology server
+ --topo_global_server_address string the address of the global topology server
+ --topo_implementation string the topology implementation to use
+ --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
+ --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
+ --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
+ --topo_read_concurrency int Concurrency of topo reads. (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+ --topocustomrule_cell string topo cell for customrules file. (default "global")
+ --topocustomrule_path string path for customrules file. Disabled if empty.
+ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
+ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
+ --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+ --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog")
+ --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+ --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+ --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+ --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+ --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9")
+ --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100)
+ --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests.
+ --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells
+ --tx-throttler-tablet-types strings A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly. (default replica)
+ --tx-throttler-topo-refresh-interval duration The rate that the transaction throttler will refresh the topology to find cells. (default 5m0s)
+ --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9")
+ --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+ --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+ --use_super_read_only Set super_read_only flag when performing planned failover.
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
+ --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+ --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+ --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+ --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1)
+ --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
+ --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+ --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+ --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+ --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence
+ --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+ --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+ --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication
+ --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY")
+ --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864)
+ --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+ --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+ --vtctld_addr string address of a vtctld instance
+ --vtgate_protocol string how to talk to vtgate (default "grpc")
+ --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/")
+ --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear
+ --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
+ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt
index 384ebd7fe06..ea8510af70d 100644
--- a/go/flags/endtoend/vttestserver.txt
+++ b/go/flags/endtoend/vttestserver.txt
@@ -5,7 +5,6 @@ Usage of vttestserver:
--backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
--backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
--backup_storage_compress if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data. (default true)
- --backup_storage_hook string if set, we send the contents of the backup files through this hook.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
--builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
--builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
@@ -48,6 +47,8 @@ Usage of vttestserver:
--grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
--grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
--grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_recv_size int Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.
+ --grpc_max_message_send_size int Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
--grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
--grpc_prometheus Enable gRPC monitoring with Prometheus.
@@ -62,13 +63,21 @@ Usage of vttestserver:
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--keyspaces strings Comma separated list of keyspaces (default [test_keyspace])
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
--log_dir string If non-empty, write log files in this directory
--log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
--logtostderr log to standard error instead of files
--max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000)
--min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000)
+ --mysql-shell-backup-location string location where the backup will be stored
+ --mysql-shell-dump-flags string flags to pass to mysql shell dump utility. This should be a JSON string and will be saved in the MANIFEST (default "{\"threads\": 4}")
+ --mysql-shell-flags string execution flags to pass to mysqlsh binary to be used during dump/load (default "--defaults-file=/dev/null --js -h localhost")
+ --mysql-shell-load-flags string flags to pass to mysql shell load utility. This should be a JSON string (default "{\"threads\": 4, \"loadUsers\": true, \"updateGtidSet\": \"replace\", \"skipBinlog\": true, \"progressFile\": \"\"}")
+ --mysql-shell-should-drain decide if we should drain while taking a backup or continue to serving traffic
+ --mysql-shell-speedup-restore speed up restore by disabling redo logging and double write buffer during the restore process
--mysql_bind_host string which host to bind vtgate mysql listener to (default "localhost")
--mysql_only If this flag is set only mysql is initialized. The rest of the vitess components are not started. Also, the output specifies the mysql unix socket instead of the vtgate port.
--mysql_server_version string MySQL server version to advertise.
@@ -76,7 +85,7 @@ Usage of vttestserver:
--mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
--null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1)
--num_shards strings Comma separated shard count (one per keyspace) (default [2])
- --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 1ns)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
@@ -97,7 +106,8 @@ Usage of vttestserver:
--snapshot_file string A MySQL DB snapshot file
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
--sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --structured-logging whether to use structured logging (PlanetScale Log) logger or the original (glog) logger
--tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
--tablet_hostname string The hostname to use for the tablet otherwise it will be derived from OS' hostname (default "localhost")
--tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
@@ -108,9 +118,13 @@ Usage of vttestserver:
--tablet_manager_grpc_key string the key to use to connect
--tablet_manager_grpc_server_name string the server name to use to validate server certificate
--tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
+ --topo_consul_allow_stale_reads Allow stale reads from consul servers
+ --topo_consul_idle_conn_timeout duration Maximum amount of time to pool idle connections. (default 1m30s)
--topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
--topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
--topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_max_conns_per_host int Maximum number of consul connections per host. (default 250)
+ --topo_consul_max_idle_conns int Maximum number of idle consul connections. (default 100)
--topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
--topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
--topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
@@ -119,9 +133,9 @@ Usage of vttestserver:
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
--transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI")
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
--vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate
--vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc")
--vtctld_grpc_ca string the server ca to use to validate servers when connecting
@@ -135,11 +149,11 @@ Usage of vttestserver:
--vtgate_grpc_key string the key to use to connect
--vtgate_grpc_server_name string the server name to use to validate server certificate
--workflow_manager_init Enable workflow manager
- --xbstream_restore_flags string flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
- --xtrabackup_backup_flags string flags to pass to backup command. These should be space separated and will be added to the end of the command
- --xtrabackup_prepare_flags string flags to pass to prepare command. These should be space separated and will be added to the end of the command
- --xtrabackup_root_path string directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
- --xtrabackup_stream_mode string which mode to use if streaming, valid values are tar and xbstream (default "tar")
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
--xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
--xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
--xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/zk.txt b/go/flags/endtoend/zk.txt
index 52bebdf4333..443bf0b9ca2 100644
--- a/go/flags/endtoend/zk.txt
+++ b/go/flags/endtoend/zk.txt
@@ -1,14 +1,8 @@
Usage of zk:
- --emit_stats If set, emit stats to push-based monitoring and stats backends
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --server string server(s) to connect to
- --stats_backend string The name of the registered push-based monitoring/stats backend to use
- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
- --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
- --stats_drop_variables string Variables to be dropped from the list of exported variables.
- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --server string server(s) to connect to
diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt
index e07334f86ac..58712c4e3b7 100644
--- a/go/flags/endtoend/zkctl.txt
+++ b/go/flags/endtoend/zkctl.txt
@@ -1,18 +1,20 @@
Usage of zkctl:
- --alsologtostderr log to standard error as well as files
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
- --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
+ --alsologtostderr log to standard error as well as files
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
+ --logtostderr log to standard error instead of files
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
+ --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
+ --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/go/flags/endtoend/zkctld.txt b/go/flags/endtoend/zkctld.txt
index 9bfec0066f7..d3e775916c4 100644
--- a/go/flags/endtoend/zkctld.txt
+++ b/go/flags/endtoend/zkctld.txt
@@ -1,19 +1,21 @@
Usage of zkctld:
- --alsologtostderr log to standard error as well as files
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- -v, --v Level log level for V logs
- --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
- --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
+ --alsologtostderr log to standard error as well as files
+ -h, --help display usage and exit
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_link string If non-empty, add symbolic links in this directory to the log files
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.
+ --logtostderr log to standard error instead of files
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging
+ --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
+ --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go
index d010ea0bc4f..953cb387944 100644
--- a/go/flagutil/flagutil.go
+++ b/go/flagutil/flagutil.go
@@ -193,6 +193,22 @@ func DualFormatBoolVar(fs *pflag.FlagSet, p *bool, name string, value bool, usag
}
}
+// DualFormatVar creates a flag which supports both dashes and underscores
+func DualFormatVar(fs *pflag.FlagSet, val pflag.Value, name string, usage string) {
+ dashes := strings.Replace(name, "_", "-", -1)
+ underscores := strings.Replace(name, "-", "_", -1)
+
+ fs.Var(val, underscores, usage)
+ if dashes != underscores {
+ fs.Var(val, dashes, fmt.Sprintf("Synonym to -%s", underscores))
+ }
+}
+
+type Value[T any] interface {
+ pflag.Value
+ Get() T
+}
+
// DurationOrIntVar implements pflag.Value for flags that have historically been
// of type IntVar (and then converted to seconds or some other unit) but are
// now transitioning to a proper DurationVar type.
@@ -252,3 +268,24 @@ func (v *DurationOrIntVar) Type() string { return "duration" }
// Value returns the underlying Duration value passed to the flag.
func (v *DurationOrIntVar) Value() time.Duration { return v.val }
+
+type DurationOrSecondsFloatFlag float64
+
+func (set *DurationOrSecondsFloatFlag) Set(s string) error {
+ if dur, err := time.ParseDuration(s); err == nil {
+ *set = DurationOrSecondsFloatFlag(dur.Seconds())
+ } else {
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ *set = DurationOrSecondsFloatFlag(f)
+ }
+ return nil
+}
+
+func (set *DurationOrSecondsFloatFlag) String() string {
+ return strconv.FormatFloat(float64(*set), 'f', -1, 64)
+}
+
+func (set *DurationOrSecondsFloatFlag) Type() string { return "DurationOrSecondsFloat" }
diff --git a/go/flagutil/flagutil_test.go b/go/flagutil/flagutil_test.go
index 4dbba7b832d..9522404684c 100644
--- a/go/flagutil/flagutil_test.go
+++ b/go/flagutil/flagutil_test.go
@@ -159,3 +159,49 @@ func TestDurationOrIntVar(t *testing.T) {
assert.Equal(t, tt.want, flag.Value())
}
}
+
+func TestDurationOrSecondsFloatFlag(t *testing.T) {
+ testCases := []struct {
+ Set string
+ Expected float64
+ ExpectedErr string
+ }{
+ {
+ Set: "1",
+ Expected: 1,
+ },
+ {
+ Set: "0.5",
+ Expected: 0.5,
+ },
+ {
+ Set: "1800",
+ Expected: 1800,
+ },
+ {
+ Set: "50ms",
+ Expected: 0.05,
+ },
+ {
+ Set: "42m",
+ Expected: 2520,
+ },
+ {
+ Set: "wont-parse",
+ ExpectedErr: `strconv.ParseFloat: parsing "wont-parse": invalid syntax`,
+ },
+ }
+
+ for _, testCase := range testCases {
+ testCase := testCase
+ t.Run(testCase.Set, func(t *testing.T) {
+ t.Parallel()
+ var f DurationOrSecondsFloatFlag
+ err := f.Set(testCase.Set)
+ if testCase.ExpectedErr != "" {
+ assert.ErrorContains(t, err, testCase.ExpectedErr)
+ }
+ assert.Equal(t, testCase.Expected, float64(f))
+ })
+ }
+}
diff --git a/go/internal/flag/flag.go b/go/internal/flag/flag.go
index a06f1c63988..5e36941e480 100644
--- a/go/internal/flag/flag.go
+++ b/go/internal/flag/flag.go
@@ -29,12 +29,15 @@ import (
"os"
"reflect"
"strings"
+ "sync"
flag "github.com/spf13/pflag"
"vitess.io/vitess/go/vt/log"
)
+var flagsMu sync.Mutex
+
// Parse wraps the standard library's flag.Parse to perform some sanity checking
// and issue deprecation warnings in advance of our move to pflag.
//
@@ -44,6 +47,9 @@ import (
//
// See VEP-4, phase 1 for details: https://github.com/vitessio/enhancements/blob/c766ea905e55409cddeb666d6073cd2ac4c9783e/veps/vep-4.md#phase-1-preparation
func Parse(fs *flag.FlagSet) {
+ flagsMu.Lock()
+ defer flagsMu.Unlock()
+ preventGlogVFlagFromClobberingVersionFlagShorthand(fs)
fs.AddGoFlagSet(goflag.CommandLine)
if fs.Lookup("help") == nil {
@@ -69,6 +75,19 @@ func Parse(fs *flag.FlagSet) {
flag.Parse()
}
+// IsFlagProvided returns if the given flag has been provided by the user explicitly or not
+func IsFlagProvided(name string) bool {
+ flagsMu.Lock()
+ defer flagsMu.Unlock()
+ found := false
+ flag.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ found = true
+ }
+ })
+ return found
+}
+
// TrickGlog tricks glog into understanding that flags have been parsed.
//
// N.B. Do not delete this function. `glog` is a persnickity package and wants
@@ -96,6 +115,32 @@ func TrickGlog() {
os.Args = append(os.Args, args...)
}
+// The default behavior of PFlagFromGoFlag (which is called on each flag when
+// calling AddGoFlagSet) is to allow any flags with single-character names to be
+// accessible both as, for example, `-v` and `--v`.
+//
+// This prevents us from exposing version via `--version|-v` (pflag will actually
+// panic when it goes to add the glog log-level flag), so we intervene to blank
+// out the Shorthand for _just_ that flag before adding the rest of the goflags
+// to a particular pflag FlagSet.
+//
+// IMPORTANT: This must be called prior to AddGoFlagSet in both Parse and
+// ParseFlagsForTest.
+func preventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) {
+ // N.B. we use goflag.Lookup instead of this package's Lookup, because we
+ // explicitly want to check only the goflags.
+ if f := goflag.Lookup("v"); f != nil {
+ if fs.Lookup("v") != nil { // This check is exactly what AddGoFlagSet does.
+ return
+ }
+
+ pf := flag.PFlagFromGoFlag(f)
+ pf.Shorthand = ""
+
+ fs.AddFlag(pf)
+ }
+}
+
// Usage invokes the current CommandLine's Usage func, or if not overridden,
// "prints a simple header and calls PrintDefaults".
func Usage() {
@@ -105,14 +150,23 @@ func Usage() {
// filterTestFlags returns two slices: the second one has just the flags for `go test` and the first one contains
// the rest of the flags.
const goTestFlagSuffix = "-test"
+const goTestRunFlag = "-test.run"
func filterTestFlags() ([]string, []string) {
args := os.Args
var testFlags []string
var otherArgs []string
+ hasExtraTestRunArg := false
for i := 0; 0 < len(args) && i < len(args); i++ {
- if strings.HasPrefix(args[i], goTestFlagSuffix) {
+ // This additional logic to check for the test.run flag is required for running single unit tests in GoLand,
+ // due to the way it uses "go tool test2json" to run the test. The CLI `go test` specifies the test as "-test.run=TestHeartbeat",
+ // but test2json as "-test.run TestHeartbeat". So in the latter case we need to also add the arg following test.run
+ if strings.HasPrefix(args[i], goTestFlagSuffix) || hasExtraTestRunArg {
+ hasExtraTestRunArg = false
testFlags = append(testFlags, args[i])
+ if args[i] == goTestRunFlag {
+ hasExtraTestRunArg = true
+ }
continue
}
otherArgs = append(otherArgs, args[i])
@@ -124,6 +178,8 @@ func filterTestFlags() ([]string, []string) {
// handle `go test` flags correctly. We need to separately parse the test flags using goflags. Additionally flags
// like test.Short() require that goflag.Parse() is called first.
func ParseFlagsForTest() {
+ flagsMu.Lock()
+ defer flagsMu.Unlock()
// We need to split up the test flags and the regular app pflags.
// Then hand them off the std flags and pflags parsers respectively.
args, testFlags := filterTestFlags()
@@ -135,6 +191,7 @@ func ParseFlagsForTest() {
}
// parse remaining flags including the log-related ones like --alsologtostderr
+ preventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine)
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
}
@@ -154,6 +211,8 @@ func Parsed() bool {
// standard library `flag` CommandLine. If found in the latter, it is converted
// to a pflag.Flag first. If found in neither, this function returns nil.
func Lookup(name string) *flag.Flag {
+ flagsMu.Lock()
+ defer flagsMu.Unlock()
if f := flag.Lookup(name); f != nil {
return f
}
@@ -169,6 +228,8 @@ func Lookup(name string) *flag.Flag {
// removed. If no double-dash was specified on the command-line, this is
// equivalent to flag.Args() from the standard library flag package.
func Args() (args []string) {
+ flagsMu.Lock()
+ defer flagsMu.Unlock()
doubleDashIdx := -1
for i, arg := range flag.Args() {
if arg == "--" {
diff --git a/go/ioutil/writer.go b/go/ioutil/writer.go
new file mode 100644
index 00000000000..80ad87428bc
--- /dev/null
+++ b/go/ioutil/writer.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+MeteredWriteCloser and MeteredWriter are respectively, time-and-byte-tracking
+wrappers around WriteCloser and Writer.
+*/
+
+package ioutil
+
+import (
+ "bytes"
+)
+
+// BytesBufferWriter implements io.WriteCloser using an in-memory buffer.
+type BytesBufferWriter struct {
+ *bytes.Buffer
+}
+
+func (m BytesBufferWriter) Close() error {
+ return nil
+}
+
+func NewBytesBufferWriter() BytesBufferWriter {
+ return BytesBufferWriter{bytes.NewBuffer(nil)}
+}
diff --git a/go/json2/unmarshal.go b/go/json2/unmarshal.go
index 4f2def0473e..fe889c9aba8 100644
--- a/go/json2/unmarshal.go
+++ b/go/json2/unmarshal.go
@@ -52,3 +52,9 @@ func annotate(data []byte, err error) error {
return fmt.Errorf("line: %d, position %d: %v", line, pos, err)
}
+
+// UnmarshalPB is similar to Unmarshal but specifically for proto.Message to add type safety.
+func UnmarshalPB(data []byte, pb proto.Message) error {
+ opts := protojson.UnmarshalOptions{DiscardUnknown: true}
+ return annotate(data, opts.Unmarshal(data, pb))
+}
diff --git a/go/json2/unmarshal_test.go b/go/json2/unmarshal_test.go
index 9b6a6af1ca2..e46c7c6e123 100644
--- a/go/json2/unmarshal_test.go
+++ b/go/json2/unmarshal_test.go
@@ -18,6 +18,10 @@ package json2
import (
"testing"
+
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/emptypb"
)
func TestUnmarshal(t *testing.T) {
@@ -48,3 +52,14 @@ func TestUnmarshal(t *testing.T) {
}
}
}
+
+func TestUnmarshalPB(t *testing.T) {
+ want := &emptypb.Empty{}
+ json, err := protojson.Marshal(want)
+ require.NoError(t, err)
+
+ var got emptypb.Empty
+ err = UnmarshalPB(json, &got)
+ require.NoError(t, err)
+ require.Equal(t, want, &got)
+}
diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go
index ddc1c6e379b..9ac11282bdc 100644
--- a/go/mysql/client_test.go
+++ b/go/mysql/client_test.go
@@ -475,7 +475,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) {
fmt.Printf("Error: %s", err)
- assert.Contains(t, err.Error(), "cannot send HandshakeResponse41: x509:")
+ assert.Contains(t, err.Error(), "cannot send HandshakeResponse41: tls:")
// Now setup proper CA that is valid to verify
params.SslCa = path.Join(root, "ca-cert.pem")
diff --git a/go/mysql/collations/coercion.go b/go/mysql/collations/coercion.go
index 3087975f239..edd1a6289ea 100644
--- a/go/mysql/collations/coercion.go
+++ b/go/mysql/collations/coercion.go
@@ -208,9 +208,16 @@ func (env *Environment) MergeCollations(left, right TypedCollation, opt Coercion
if leftColl == nil || rightColl == nil {
return TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation)
}
+
leftCS := leftColl.Charset()
rightCS := rightColl.Charset()
+ if left.Coercibility == CoerceExplicit && right.Coercibility == CoerceExplicit {
+ if left.Collation != right.Collation {
+ goto cannotCoerce
+ }
+ }
+
if leftCS.Name() == rightCS.Name() {
switch {
case left.Coercibility < right.Coercibility:
diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go
index 486c0c3dc53..58aae5aa979 100644
--- a/go/mysql/collations/integration/coercion_test.go
+++ b/go/mysql/collations/integration/coercion_test.go
@@ -135,6 +135,10 @@ func TestComparisonSemantics(t *testing.T) {
conn := mysqlconn(t)
defer conn.Close()
+ if v, err := conn.ServerVersionAtLeast(8, 0, 31); err != nil || !v {
+ t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31")
+ }
+
for _, coll := range collations.Local().AllCollations() {
text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString))
testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll})
diff --git a/go/mysql/collations/internal/uca/iter_fast_900.go b/go/mysql/collations/internal/uca/iter_fast_900.go
index 1d2aafe0f22..cbe32cfdb70 100644
--- a/go/mysql/collations/internal/uca/iter_fast_900.go
+++ b/go/mysql/collations/internal/uca/iter_fast_900.go
@@ -66,7 +66,7 @@ func (it *FastIterator900) FastForward32(it2 *FastIterator900) int {
p1 := it.input
p2 := it2.input
- var w1, w2 uint32
+ var w1, w2 uint16
for len(p1) >= 4 && len(p2) >= 4 {
dword1 := *(*uint32)(unsafe.Pointer(&p1[0]))
@@ -75,17 +75,20 @@ func (it *FastIterator900) FastForward32(it2 *FastIterator900) int {
if nonascii == 0 {
if dword1 != dword2 {
+ // Use the weight string fast tables for quick weight comparisons;
+ // see (*FastIterator900).NextWeightBlock64 for a description of
+ // the table format
table := it.fastTable
- if w1, w2 = table[p1[0]], table[p2[0]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[0]]), uint16(table[p2[0]]); w1 != w2 {
goto mismatch
}
- if w1, w2 = table[p1[1]], table[p2[1]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[1]]), uint16(table[p2[1]]); w1 != w2 {
goto mismatch
}
- if w1, w2 = table[p1[2]], table[p2[2]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[2]]), uint16(table[p2[2]]); w1 != w2 {
goto mismatch
}
- if w1, w2 = table[p1[3]], table[p2[3]]; w1 != w2 {
+ if w1, w2 = uint16(table[p1[3]]), uint16(table[p2[3]]); w1 != w2 {
goto mismatch
}
}
@@ -114,7 +117,8 @@ mismatch:
it.unicode++
return 0
}
- return int(w1) - int(w2)
+ // The weights must be byte-swapped before comparison because they're stored in big endian
+ return int(bits.ReverseBytes16(w1)) - int(bits.ReverseBytes16(w2))
}
// NextWeightBlock64 takes a byte slice of 16 bytes and fills it with the next
diff --git a/go/mysql/conn.go b/go/mysql/conn.go
index a65f9d117d3..c7492aec35c 100644
--- a/go/mysql/conn.go
+++ b/go/mysql/conn.go
@@ -1483,12 +1483,9 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) {
// session tracking
if statusFlags&ServerSessionStateChanged == ServerSessionStateChanged {
length, ok := data.readLenEncInt()
- if !ok {
- return fail("invalid OK packet session state change length: %v", data)
- }
- // In case we have a zero length string, there's no additional information so
- // we can return the packet.
- if length == 0 {
+ if !ok || length == 0 {
+ // In case we have no more data or a zero length string, there's no additional information so
+ // we can return the packet.
return packetOK, nil
}
diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go
index 327c25a0c29..1b9e7c824e2 100644
--- a/go/mysql/conn_flaky_test.go
+++ b/go/mysql/conn_flaky_test.go
@@ -366,6 +366,11 @@ func TestOkPackets(t *testing.T) {
expectedErr: "invalid OK packet warnings: &{[0 0 0 2 0] 0}",
}, {
dataIn: `
+00000000 FE 00 00 22 40 00 00 |.....|`,
+ dataOut: `00000000 00 00 00 22 40 00 00 00 04 03 02 00 00 |..."@........|`,
+ cc: CapabilityClientProtocol41 | CapabilityClientTransactions | CapabilityClientSessionTrack | CapabilityClientDeprecateEOF,
+ }, {
+ dataIn: `
00000000 00 00 00 02 40 00 00 00 2a 03 28 00 26 66 32 37 |....@...*.(.&f27|
00000010 66 36 39 37 31 2d 30 33 65 37 2d 31 31 65 62 2d |f6971-03e7-11eb-|
00000020 38 35 63 35 2d 39 38 61 66 36 35 61 36 64 63 34 |85c5-98af65a6dc4|
@@ -1029,6 +1034,15 @@ func (t testRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result)
if strings.Contains(query, "panic") {
panic("test panic attack!")
}
+ if strings.Contains(query, "close before rows read") {
+ c.writeFields(selectRowsResult)
+ // We want to close the connection after the fields are written
+ // and read on the client. So we sleep for 100 milliseconds
+ time.Sleep(100 * time.Millisecond)
+ c.Close()
+ return nil
+ }
+
if strings.Contains(query, "twice") {
callback(selectRowsResult)
}
diff --git a/go/mysql/constants.go b/go/mysql/constants.go
index adc6aeacbd3..54c92a9b4b5 100644
--- a/go/mysql/constants.go
+++ b/go/mysql/constants.go
@@ -273,7 +273,7 @@ const (
// Error codes for client-side errors.
// Originally found in include/mysql/errmsg.h and
-// https://dev.mysql.com/doc/refman/5.7/en/error-messages-client.html
+// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html
const (
// CRUnknownError is CR_UNKNOWN_ERROR
CRUnknownError = 2000
@@ -286,6 +286,10 @@ const (
// This is returned if a connection via a TCP socket fails.
CRConnHostError = 2003
+ // CRUnknownHost is CR_UNKNOWN_HOST
+ // This is returned if the host name cannot be resolved.
+ CRUnknownHost = 2005
+
// CRServerGone is CR_SERVER_GONE_ERROR.
// This is returned if the client tries to send a command but it fails.
CRServerGone = 2006
@@ -325,7 +329,7 @@ const (
// Error codes for server-side errors.
// Originally found in include/mysql/mysqld_error.h and
-// https://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html
+// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html
// The below are in sorted order by value, grouped by vterror code they should be bucketed into.
// See above reference for more information on each code.
const (
@@ -532,6 +536,7 @@ const (
ERDataTooLong = 1406
ErrWrongValueForType = 1411
ERWarnDataTruncated = 1265
+ ERNoSuchUser = 1449
ERForbidSchemaChange = 1450
ERDataOutOfRange = 1690
ERInvalidJSONText = 3140
@@ -543,6 +548,9 @@ const (
ERJSONDocumentTooDeep = 3157
ERWrongValue = 1525
+ // max execution time exceeded
+ ERQueryTimeout = 3024
+
ErrCantCreateGeometryObject = 1416
ErrGISDataWrongEndianess = 3055
ErrNotImplementedForCartesianSRS = 3704
@@ -677,8 +685,12 @@ func IsEphemeralError(err error) bool {
CRConnHostError,
CRMalformedPacket,
CRNamedPipeStateError,
+ CRServerHandshakeErr,
+ CRServerGone,
CRServerLost,
CRSSLConnectionError,
+ CRUnknownError,
+ CRUnknownHost,
ERCantCreateThread,
ERDiskFull,
ERForcingClose,
@@ -689,6 +701,7 @@ func IsEphemeralError(err error) bool {
ERInternalError,
ERLockDeadlock,
ERLockWaitTimeout,
+ ERQueryTimeout,
EROutOfMemory,
EROutOfResources,
EROutOfSortMemory,
diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go
index 7840c288dbf..03b43c29742 100644
--- a/go/mysql/fakesqldb/server.go
+++ b/go/mysql/fakesqldb/server.go
@@ -125,6 +125,10 @@ type DB struct {
// if fakesqldb is asked to serve queries or query patterns that it has not been explicitly told about it will
// error out by default. However if you set this flag then any unmatched query results in an empty result
neverFail bool
+
+ // lastError stores the last error in returning a query result.
+ lastErrorMu sync.Mutex
+ lastError error
}
// QueryHandler is the interface used by the DB to simulate executed queries
@@ -176,6 +180,7 @@ func New(t testing.TB) *DB {
connections: make(map[uint32]*mysql.Conn),
queryPatternUserCallback: make(map[*regexp.Regexp]func(string)),
patternData: make(map[string]exprResult),
+ lastErrorMu: sync.Mutex{},
}
db.Handler = db
@@ -249,6 +254,13 @@ func (db *DB) CloseAllConnections() {
}
}
+// LastError gives the last error the DB ran into
+func (db *DB) LastError() error {
+ db.lastErrorMu.Lock()
+ defer db.lastErrorMu.Unlock()
+ return db.lastError
+}
+
// WaitForClose should be used after CloseAllConnections() is closed and
// you want to provoke a MySQL client error with errno 2006.
//
@@ -346,7 +358,14 @@ func (db *DB) WarningCount(c *mysql.Conn) uint16 {
}
// HandleQuery is the default implementation of the QueryHandler interface
-func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) error {
+func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) (err error) {
+ defer func() {
+ if err != nil {
+ db.lastErrorMu.Lock()
+ db.lastError = err
+ db.lastErrorMu.Unlock()
+ }
+ }()
if db.AllowAll {
return callback(&sqltypes.Result{})
}
@@ -417,7 +436,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R
return callback(&sqltypes.Result{})
}
// Nothing matched.
- err := fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", query, db.name)
+ err = fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", query, db.name)
log.Errorf("Query not found: %s:%s", query, debug.Stack())
return err
@@ -594,11 +613,15 @@ func (db *DB) GetQueryCalledNum(query string) int {
// QueryLog returns the query log in a semicomma separated string
func (db *DB) QueryLog() string {
+ db.mu.Lock()
+ defer db.mu.Unlock()
return strings.Join(db.querylog, ";")
}
// ResetQueryLog resets the query log
func (db *DB) ResetQueryLog() {
+ db.mu.Lock()
+ defer db.mu.Unlock()
db.querylog = nil
}
diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go
index ba4982db4d3..8e7ed44b957 100644
--- a/go/mysql/flavor_mysql.go
+++ b/go/mysql/flavor_mysql.go
@@ -341,17 +341,41 @@ GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment`
// We join with a subquery that materializes the data from `information_schema.innodb_sys_tablespaces`
// early for performance reasons. This effectively causes only a single read of `information_schema.innodb_tablespaces`
// per query.
+// Note the following:
+// - We use UNION ALL to deal differently with partitioned tables vs. non-partitioned tables.
+// Originally, the query handled both, but that introduced "WHERE ... OR" conditions that led to poor query
+// optimization. By separating to UNION ALL we remove all "OR" conditions.
+// - We utilize `INFORMATION_SCHEMA`.`TABLES`.`CREATE_OPTIONS` column to do early pruning before the JOIN.
+// - `TABLES`.`TABLE_NAME` has `utf8mb4_0900_ai_ci` collation. `INNODB_TABLESPACES`.`NAME` has `utf8mb3_general_ci`.
+// We normalize the collation to get better query performance (we force the casting at the time of our choosing)
+// - `create_options` is NULL for views, and therefore we need an additional UNION ALL to include views
const TablesWithSize80 = `SELECT t.table_name,
- t.table_type,
- UNIX_TIMESTAMP(t.create_time),
- t.table_comment,
- SUM(i.file_size),
- SUM(i.allocated_size)
-FROM information_schema.tables t
-INNER JOIN information_schema.innodb_tablespaces i
- ON i.name LIKE CONCAT(database(), '/%') AND (i.name = CONCAT(t.table_schema, '/', t.table_name) OR i.name LIKE CONCAT(t.table_schema, '/', t.table_name, '#p#%'))
-WHERE t.table_schema = database()
-GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment`
+ t.table_type,
+ UNIX_TIMESTAMP(t.create_time),
+ t.table_comment,
+ i.file_size,
+ i.allocated_size
+ FROM information_schema.tables t
+ LEFT JOIN information_schema.innodb_tablespaces i
+ ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8_general_ci
+ WHERE
+ t.table_schema = database() AND not t.create_options <=> 'partitioned'
+UNION ALL
+ SELECT
+ t.table_name,
+ t.table_type,
+ UNIX_TIMESTAMP(t.create_time),
+ t.table_comment,
+ SUM(i.file_size),
+ SUM(i.allocated_size)
+ FROM information_schema.tables t
+ LEFT JOIN information_schema.innodb_tablespaces i
+ ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8_general_ci )
+ WHERE
+ t.table_schema = database() AND t.create_options <=> 'partitioned'
+ GROUP BY
+ t.table_schema, t.table_name, t.table_type, t.create_time, t.table_comment
+`
// baseShowTablesWithSizes is part of the Flavor interface.
func (mysqlFlavor56) baseShowTablesWithSizes() string {
diff --git a/go/mysql/query.go b/go/mysql/query.go
index 6818d646c57..f8adb91f60f 100644
--- a/go/mysql/query.go
+++ b/go/mysql/query.go
@@ -416,7 +416,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result,
for {
data, err := c.readEphemeralPacket()
if err != nil {
- return nil, false, 0, err
+ return nil, false, 0, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
}
if c.isEOFPacket(data) {
@@ -573,7 +573,7 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b
}
if prepare.ParamsCount > 0 {
- bitMap, pos, ok = readBytes(payload, pos, int((prepare.ParamsCount+7)/8))
+ bitMap, pos, ok = readBytes(payload, pos, (int(prepare.ParamsCount)+7)/8)
if !ok {
return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading NULL-bitmap failed")
}
diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go
index 5fdcbc8c830..3ec54dad6d7 100644
--- a/go/mysql/query_test.go
+++ b/go/mysql/query_test.go
@@ -375,6 +375,32 @@ func TestComStmtClose(t *testing.T) {
}
}
+// This test has been added to verify that IO errors in a connection lead to SQL Server lost errors
+// So that we end up closing the connection higher up the stack and not reusing it.
+// This test was added in response to a panic that was run into.
+func TestSQLErrorOnServerClose(t *testing.T) {
+ // Create socket pair for the server and client
+ listener, sConn, cConn := createSocketPair(t)
+ defer func() {
+ listener.Close()
+ sConn.Close()
+ cConn.Close()
+ }()
+
+ err := cConn.WriteComQuery("close before rows read")
+ require.NoError(t, err)
+
+ handler := &testRun{t: t}
+ _ = sConn.handleNextCommand(handler)
+
+ // From the server we will receive a field packet which the client will read
+ // At that point, if the server crashes and closes the connection.
+ // We should be getting a Connection lost error.
+ _, _, _, err = cConn.ReadQueryResult(100, true)
+ require.Error(t, err)
+ require.True(t, IsConnLostDuringQuery(err), err.Error())
+}
+
func TestQueries(t *testing.T) {
listener, sConn, cConn := createSocketPair(t)
defer func() {
diff --git a/go/mysql/sql_error.go b/go/mysql/sql_error.go
index 22cd2c2ae9e..347c1abcdad 100644
--- a/go/mysql/sql_error.go
+++ b/go/mysql/sql_error.go
@@ -21,6 +21,7 @@ import (
"fmt"
"regexp"
"strconv"
+ "strings"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
@@ -135,7 +136,11 @@ func mapToSQLErrorFromErrorCode(err error, msg string) *SQLError {
ss = SSAccessDeniedError
case vtrpcpb.Code_RESOURCE_EXHAUSTED:
num = demuxResourceExhaustedErrors(err.Error())
- ss = SSClientError
+ // 1041 ER_OUT_OF_RESOURCES has SQLSTATE HYOOO as per https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_out_of_resources,
+ // so don't override it here in that case.
+ if num != EROutOfResources {
+ ss = SSClientError
+ }
case vtrpcpb.Code_UNIMPLEMENTED:
num = ERNotSupportedYet
ss = SSClientError
@@ -223,6 +228,8 @@ func demuxResourceExhaustedErrors(msg string) int {
switch {
case isGRPCOverflowRE.Match([]byte(msg)):
return ERNetPacketTooLarge
+ case strings.Contains(msg, "Transaction throttled"):
+ return EROutOfResources
default:
return ERTooManyUserConnections
}
diff --git a/go/mysql/sql_error_test.go b/go/mysql/sql_error_test.go
index c6fe2f65251..e3b6edf47cc 100644
--- a/go/mysql/sql_error_test.go
+++ b/go/mysql/sql_error_test.go
@@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/assert"
)
-func TestDumuxResourceExhaustedErrors(t *testing.T) {
+func TestDemuxResourceExhaustedErrors(t *testing.T) {
type testCase struct {
msg string
want int
@@ -42,6 +42,7 @@ func TestDumuxResourceExhaustedErrors(t *testing.T) {
// This should be explicitly handled by returning ERNetPacketTooLarge from the execturo directly
// and therefore shouldn't need to be teased out of another error.
{"in-memory row count exceeded allowed limit of 13", ERTooManyUserConnections},
+ {"rpc error: code = ResourceExhausted desc = Transaction throttled", EROutOfResources},
}
for _, c := range cases {
@@ -151,6 +152,11 @@ func TestNewSQLErrorFromError(t *testing.T) {
num: ERNoDb,
ss: SSNoDB,
},
+ {
+ err: vterrors.Errorf(vtrpc.Code_RESOURCE_EXHAUSTED, "vttablet: rpc error: code = ResourceExhausted desc = Transaction throttled"),
+ num: EROutOfResources,
+ ss: SSUnknownSQLState,
+ },
}
for _, tc := range tCases {
diff --git a/go/stats/export.go b/go/stats/export.go
index 17218443c87..0a335517a14 100644
--- a/go/stats/export.go
+++ b/go/stats/export.go
@@ -52,18 +52,13 @@ var (
// CommonTags is a comma-separated list of common tags for stats backends
var CommonTags []string
-func init() {
- registerFlags()
-}
-
-func registerFlags() {
- pflag.BoolVar(&emitStats, "emit_stats", emitStats, "If set, emit stats to push-based monitoring and stats backends")
- pflag.DurationVar(&statsEmitPeriod, "stats_emit_period", statsEmitPeriod, "Interval between emitting stats to all registered backends")
- pflag.StringVar(&statsBackend, "stats_backend", statsBackend, "The name of the registered push-based monitoring/stats backend to use")
- pflag.StringVar(&combineDimensions, "stats_combine_dimensions", combineDimensions, `List of dimensions to be combined into a single "all" value in exported stats vars`)
- pflag.StringVar(&dropVariables, "stats_drop_variables", dropVariables, `Variables to be dropped from the list of exported variables.`)
- pflag.StringSliceVar(&CommonTags, "stats_common_tags", CommonTags, `Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2`)
-
+func RegisterFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&emitStats, "emit_stats", emitStats, "If set, emit stats to push-based monitoring and stats backends")
+ fs.DurationVar(&statsEmitPeriod, "stats_emit_period", statsEmitPeriod, "Interval between emitting stats to all registered backends")
+ fs.StringVar(&statsBackend, "stats_backend", statsBackend, "The name of the registered push-based monitoring/stats backend to use")
+ fs.StringVar(&combineDimensions, "stats_combine_dimensions", combineDimensions, `List of dimensions to be combined into a single "all" value in exported stats vars`)
+ fs.StringVar(&dropVariables, "stats_drop_variables", dropVariables, `Variables to be dropped from the list of exported variables.`)
+ fs.StringSliceVar(&CommonTags, "stats_common_tags", CommonTags, `Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2`)
}
// StatsAllStr is the consolidated name if a dimension gets combined.
diff --git a/go/stats/statsd/statsd_test.go b/go/stats/statsd/statsd_test.go
index 982ad321f0e..5fa0901e6b3 100644
--- a/go/stats/statsd/statsd_test.go
+++ b/go/stats/statsd/statsd_test.go
@@ -55,7 +55,7 @@ func TestStatsdCounter(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.counter_name:1|c"
+ expected := "test.counter_name:1|c\n"
assert.Equal(t, result, expected)
}
})
@@ -84,7 +84,7 @@ func TestStatsdGauge(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauge_name:10.000000|g"
+ expected := "test.gauge_name:10|g\n"
assert.Equal(t, result, expected)
}
})
@@ -113,7 +113,7 @@ func TestStatsdGaugeFloat64(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauge_name_f64:3.140000|g"
+ expected := "test.gauge_name_f64:3.14|g\n"
assert.Equal(t, result, expected)
}
})
@@ -143,7 +143,7 @@ func TestStatsdGaugeFunc(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauge_func_name:2.000000|g"
+ expected := "test.gauge_func_name:2|g\n"
assert.Equal(t, result, expected)
}
})
@@ -172,8 +172,8 @@ func TestStatsdCounterDuration(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.counter_duration_name:1.000000|ms"
- assert.Equal(t, result, expected)
+ expected := "test.counter_duration_name:1.000000|ms\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -203,11 +203,12 @@ func TestStatsdCountersWithSingleLabel(t *testing.T) {
result := strings.Split(string(bytes[:n]), "\n")
sort.Strings(result)
expected := []string{
+ "",
"test.counter_with_single_label_name:0|c|#label:tag2",
"test.counter_with_single_label_name:2|c|#label:tag1",
}
for i, res := range result {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
@@ -236,8 +237,8 @@ func TestStatsdCountersWithMultiLabels(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.counter_with_multiple_label_name:1|c|#label1:foo,label2:bar"
- assert.Equal(t, result, expected)
+ expected := "test.counter_with_multiple_label_name:1|c|#label1:foo,label2:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -271,11 +272,12 @@ func TestStatsdCountersFuncWithMultiLabels(t *testing.T) {
result := strings.Split(string(bytes[:n]), "\n")
sort.Strings(result)
expected := []string{
+ "",
"test.counter_func_with_multiple_labels_name:1|c|#label1:foo,label2:bar",
"test.counter_func_with_multiple_labels_name:2|c|#label1:bar,label2:baz",
}
for i, res := range result {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
@@ -304,8 +306,8 @@ func TestStatsdGaugesWithMultiLabels(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauges_with_multiple_label_name:3.000000|g|#label1:foo,label2:bar"
- assert.Equal(t, result, expected)
+ expected := "test.gauges_with_multiple_label_name:3|g|#label1:foo,label2:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -339,11 +341,12 @@ func TestStatsdGaugesFuncWithMultiLabels(t *testing.T) {
result := strings.Split(string(bytes[:n]), "\n")
sort.Strings(result)
expected := []string{
- "test.gauges_func_with_multiple_labels_name:1.000000|g|#label1:foo,label2:bar",
- "test.gauges_func_with_multiple_labels_name:2.000000|g|#label1:bar,label2:baz",
+ "",
+ "test.gauges_func_with_multiple_labels_name:1|g|#label1:foo,label2:bar",
+ "test.gauges_func_with_multiple_labels_name:2|g|#label1:bar,label2:baz",
}
for i, res := range result {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
@@ -372,8 +375,8 @@ func TestStatsdGaugesWithSingleLabel(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.gauges_with_single_label_name:1.000000|g|#label1:bar"
- assert.Equal(t, result, expected)
+ expected := "test.gauges_with_single_label_name:1|g|#label1:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -401,8 +404,8 @@ func TestStatsdMultiTimings(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.multi_timings_name:10.000000|ms|#label1:foo,label2:bar"
- assert.Equal(t, result, expected)
+ expected := "test.multi_timings_name:10.000000|ms|#label1:foo,label2:bar\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -430,8 +433,8 @@ func TestStatsdTimings(t *testing.T) {
t.Fatal(err)
}
result := string(bytes[:n])
- expected := "test.timings_name:2.000000|ms|#label1:foo"
- assert.Equal(t, result, expected)
+ expected := "test.timings_name:2.000000|ms|#label1:foo\n"
+ assert.Equal(t, expected, result)
}
})
if !found {
@@ -462,12 +465,13 @@ func TestStatsdHistogram(t *testing.T) {
}
result := string(bytes[:n])
expected := []string{
- "test.histogram_name:2.000000|h",
- "test.histogram_name:3.000000|h",
- "test.histogram_name:6.000000|h",
+ "test.histogram_name:2|h",
+ "test.histogram_name:3|h",
+ "test.histogram_name:6|h",
+ "",
}
for i, res := range strings.Split(result, "\n") {
- assert.Equal(t, res, expected[i])
+ assert.Equal(t, expected[i], res)
}
}
})
diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go
index 7875ae1146b..4f47353e4e8 100644
--- a/go/streamlog/streamlog.go
+++ b/go/streamlog/streamlog.go
@@ -20,6 +20,7 @@ package streamlog
import (
"fmt"
"io"
+ rand "math/rand"
"net/http"
"net/url"
"os"
@@ -53,6 +54,7 @@ var (
queryLogFilterTag string
queryLogRowThreshold uint64
queryLogFormat = "text"
+ queryLogSampleRate float64
)
func GetRedactDebugUIQueries() bool {
@@ -79,6 +81,10 @@ func SetQueryLogRowThreshold(newQueryLogRowThreshold uint64) {
queryLogRowThreshold = newQueryLogRowThreshold
}
+func SetQueryLogSampleRate(sampleRate float64) {
+ queryLogSampleRate = sampleRate
+}
+
func GetQueryLogFormat() string {
return queryLogFormat
}
@@ -106,6 +112,8 @@ func registerStreamLogFlags(fs *pflag.FlagSet) {
// QueryLogRowThreshold only log queries returning or affecting this many rows
fs.Uint64Var(&queryLogRowThreshold, "querylog-row-threshold", queryLogRowThreshold, "Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.")
+ // QueryLogSampleRate causes a sample of queries to be logged
+ fs.Float64Var(&queryLogSampleRate, "querylog-sample-rate", queryLogSampleRate, "Sample rate for logging queries. Value must be between 0.0 (no logging) and 1.0 (all queries)")
}
const (
@@ -259,9 +267,22 @@ func GetFormatter(logger *StreamLogger) LogFormatter {
}
}
+// shouldSampleQuery returns true if a query should be sampled based on queryLogSampleRate
+func shouldSampleQuery() bool {
+ if queryLogSampleRate <= 0 {
+ return false
+ } else if queryLogSampleRate >= 1 {
+ return true
+ }
+ return rand.Float64() <= queryLogSampleRate
+}
+
// ShouldEmitLog returns whether the log with the given SQL query
// should be emitted or filtered
func ShouldEmitLog(sql string, rowsAffected, rowsReturned uint64) bool {
+ if shouldSampleQuery() {
+ return true
+ }
if queryLogRowThreshold > maxUint64(rowsAffected, rowsReturned) && queryLogFilterTag == "" {
return false
}
diff --git a/go/streamlog/streamlog_flaky_test.go b/go/streamlog/streamlog_flaky_test.go
index 0f2083a8486..98dddcc9e05 100644
--- a/go/streamlog/streamlog_flaky_test.go
+++ b/go/streamlog/streamlog_flaky_test.go
@@ -28,6 +28,9 @@ import (
"syscall"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
type logMessage struct {
@@ -253,3 +256,131 @@ func TestFile(t *testing.T) {
t.Errorf("streamlog file: want %q got %q", want, got)
}
}
+
+func TestShouldSampleQuery(t *testing.T) {
+ queryLogSampleRate = -1
+ assert.False(t, shouldSampleQuery())
+
+ queryLogSampleRate = 0
+ assert.False(t, shouldSampleQuery())
+
+ // for test coverage, can't test a random result
+ queryLogSampleRate = 0.5
+ shouldSampleQuery()
+
+ queryLogSampleRate = 1.0
+ assert.True(t, shouldSampleQuery())
+
+ queryLogSampleRate = 100.0
+ assert.True(t, shouldSampleQuery())
+}
+
+func TestShouldEmitLog(t *testing.T) {
+ origQueryLogFilterTag := queryLogFilterTag
+ origQueryLogRowThreshold := queryLogRowThreshold
+ origQueryLogSampleRate := queryLogSampleRate
+ defer func() {
+ SetQueryLogFilterTag(origQueryLogFilterTag)
+ SetQueryLogRowThreshold(origQueryLogRowThreshold)
+ SetQueryLogSampleRate(origQueryLogSampleRate)
+ }()
+
+ tests := []struct {
+ sql string
+ qLogFilterTag string
+ qLogRowThreshold uint64
+ qLogSampleRate float64
+ rowsAffected uint64
+ rowsReturned uint64
+ ok bool
+ }{
+ {
+ sql: "queryLogThreshold smaller than affected and returned",
+ qLogFilterTag: "",
+ qLogRowThreshold: 2,
+ qLogSampleRate: 0.0,
+ rowsAffected: 7,
+ rowsReturned: 7,
+ ok: true,
+ },
+ {
+ sql: "queryLogThreshold greater than affected and returned",
+ qLogFilterTag: "",
+ qLogRowThreshold: 27,
+ qLogSampleRate: 0.0,
+ rowsAffected: 7,
+ rowsReturned: 17,
+ ok: false,
+ },
+ {
+ sql: "this doesn't contains queryFilterTag: TAG",
+ qLogFilterTag: "special tag",
+ qLogRowThreshold: 10,
+ qLogSampleRate: 0.0,
+ rowsAffected: 7,
+ rowsReturned: 17,
+ ok: false,
+ },
+ {
+ sql: "this contains queryFilterTag: TAG",
+ qLogFilterTag: "TAG",
+ qLogRowThreshold: 0,
+ qLogSampleRate: 0.0,
+ rowsAffected: 7,
+ rowsReturned: 17,
+ ok: true,
+ },
+ {
+ sql: "this contains querySampleRate: 1.0",
+ qLogFilterTag: "",
+ qLogRowThreshold: 0,
+ qLogSampleRate: 1.0,
+ rowsAffected: 7,
+ rowsReturned: 17,
+ ok: true,
+ },
+ {
+ sql: "this contains querySampleRate: 1.0 without expected queryFilterTag",
+ qLogFilterTag: "TAG",
+ qLogRowThreshold: 0,
+ qLogSampleRate: 1.0,
+ rowsAffected: 7,
+ rowsReturned: 17,
+ ok: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.sql, func(t *testing.T) {
+ SetQueryLogFilterTag(tt.qLogFilterTag)
+ SetQueryLogRowThreshold(tt.qLogRowThreshold)
+ SetQueryLogSampleRate(tt.qLogSampleRate)
+
+ require.Equal(t, tt.ok, ShouldEmitLog(tt.sql, tt.rowsAffected, tt.rowsReturned))
+ })
+ }
+}
+
+func BenchmarkShouldEmitLog(b *testing.B) {
+ b.Run("default", func(b *testing.B) {
+ SetQueryLogSampleRate(0.0)
+ for i := 0; i < b.N; i++ {
+ ShouldEmitLog("select * from test where user='someone'", 0, 123)
+ }
+ })
+ b.Run("filter_tag", func(b *testing.B) {
+ SetQueryLogSampleRate(0.0)
+ SetQueryLogFilterTag("LOG_QUERY")
+ defer SetQueryLogFilterTag("")
+ for i := 0; i < b.N; i++ {
+ ShouldEmitLog("select /* LOG_QUERY=1 */ * from test where user='someone'", 0, 123)
+ }
+ })
+ b.Run("50%_sample_rate", func(b *testing.B) {
+ SetQueryLogSampleRate(0.5)
+ defer SetQueryLogSampleRate(0.0)
+ for i := 0; i < b.N; i++ {
+ ShouldEmitLog("select * from test where user='someone'", 0, 123)
+ }
+ })
+}
diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go
index f67b5fadeed..b9f780c770d 100644
--- a/go/test/endtoend/backup/vtbackup/backup_only_test.go
+++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go
@@ -66,6 +66,9 @@ func TestTabletInitialBackup(t *testing.T) {
// Restore the Tablets
restore(t, primary, "replica", "NOT_SERVING")
err := localCluster.VtctlclientProcess.ExecuteCommand(
+ "SetReadWrite", primary.Alias)
+ require.Nil(t, err)
+ err = localCluster.VtctlclientProcess.ExecuteCommand(
"TabletExternallyReparented", primary.Alias)
require.Nil(t, err)
restore(t, replica1, "replica", "SERVING")
@@ -152,17 +155,19 @@ func firstBackupTest(t *testing.T, tabletType string) {
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
// check that the restored replica has the right local_metadata
- result, err := replica2.VttabletProcess.QueryTabletWithDB("select * from local_metadata", "_vt")
- require.Nil(t, err)
- require.NotNil(t, result)
- require.NotEmpty(t, result.Rows)
- assert.Equal(t, replica2.Alias, result.Rows[0][1].ToString(), "Alias")
- assert.Equal(t, "ks.0", result.Rows[1][1].ToString(), "ClusterAlias")
- assert.Equal(t, cell, result.Rows[2][1].ToString(), "DataCenter")
- if tabletType == "replica" {
- assert.Equal(t, "neutral", result.Rows[3][1].ToString(), "PromotionRule")
- } else {
- assert.Equal(t, "must_not", result.Rows[3][1].ToString(), "PromotionRule")
+ if localCluster.VtTabletMajorVersion <= 15 {
+ result, err := replica2.VttabletProcess.QueryTabletWithDB("select * from local_metadata", "_vt")
+ require.Nil(t, err)
+ require.NotNil(t, result)
+ require.NotEmpty(t, result.Rows)
+ assert.Equal(t, replica2.Alias, result.Rows[0][1].ToString(), "Alias")
+ assert.Equal(t, "ks.0", result.Rows[1][1].ToString(), "ClusterAlias")
+ assert.Equal(t, cell, result.Rows[2][1].ToString(), "DataCenter")
+ if tabletType == "replica" {
+ assert.Equal(t, "neutral", result.Rows[3][1].ToString(), "PromotionRule")
+ } else {
+ assert.Equal(t, "must_not", result.Rows[3][1].ToString(), "PromotionRule")
+ }
}
removeBackups(t)
diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go
index ce0720e77c4..0ac5cb521bf 100644
--- a/go/test/endtoend/backup/vtbackup/main_test.go
+++ b/go/test/endtoend/backup/vtbackup/main_test.go
@@ -135,6 +135,16 @@ func TestMain(m *testing.M) {
}
}
+ if localCluster.VtTabletMajorVersion >= 16 {
+ // If vttablets are any lower than version 16, then they are running the replication manager.
+ // Running VTOrc and replication manager sometimes creates the situation where VTOrc has set up semi-sync on the primary,
+ // but the replication manager starts replication on the replica without setting semi-sync. This hangs the primary.
+ // Even if VTOrc fixes it, since there is no ongoing traffic, the state remains blocked.
+ if err := localCluster.StartVTOrc(keyspaceName); err != nil {
+ return 1, err
+ }
+ }
+
return m.Run(), nil
}()
diff --git a/go/test/endtoend/backup/vtctlbackup/backup_test.go b/go/test/endtoend/backup/vtctlbackup/backup_test.go
index e8d3ba2c8d7..d52b3554a40 100644
--- a/go/test/endtoend/backup/vtctlbackup/backup_test.go
+++ b/go/test/endtoend/backup/vtctlbackup/backup_test.go
@@ -24,11 +24,12 @@ import (
// TestBackupMain - main tests backup using vtctl commands
func TestBackupMain(t *testing.T) {
- TestBackup(t, Backup, "xbstream", 0, nil, nil)
+ TestBackup(t, BuiltinBackup, "xbstream", 0, nil, nil)
}
func TestBackupMainWithZstdCompression(t *testing.T) {
defer setDefaultCompressionFlag()
+ defer setDefaultCommonArgs()
cDetails := &CompressionDetails{
CompressorEngineName: "zstd",
ExternalCompressorCmd: "zstd",
@@ -36,7 +37,7 @@ func TestBackupMainWithZstdCompression(t *testing.T) {
ExternalDecompressorCmd: "zstd -d",
}
- TestBackup(t, Backup, "xbstream", 0, cDetails, []string{"TestReplicaBackup", "TestPrimaryBackup"})
+ TestBackup(t, BuiltinBackup, "xbstream", 0, cDetails, []string{"TestReplicaBackup", "TestPrimaryBackup"})
}
func setDefaultCompressionFlag() {
diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go
index e862e1cd52c..b830500e032 100644
--- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go
+++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go
@@ -18,8 +18,10 @@ package vtctlbackup
import (
"bufio"
+ "context"
"encoding/json"
"fmt"
+ "math/rand"
"os"
"os/exec"
"path"
@@ -28,9 +30,15 @@ import (
"testing"
"time"
+ "vitess.io/vitess/go/json2"
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/textutil"
"vitess.io/vitess/go/vt/mysqlctl"
+ "vitess.io/vitess/go/vt/mysqlctl/backupstorage"
"vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"github.com/stretchr/testify/assert"
@@ -42,19 +50,21 @@ import (
// constants for test variants
const (
XtraBackup = iota
- Backup
+ BuiltinBackup
Mysqlctld
- timeout = time.Duration(60 * time.Second)
+ MySQLShell
+ timeout = time.Duration(60 * time.Second)
+ topoConsistencyTimeout = 20 * time.Second
)
var (
- primary *cluster.Vttablet
- replica1 *cluster.Vttablet
- replica2 *cluster.Vttablet
- localCluster *cluster.LocalProcessCluster
- newInitDBFile string
- useXtrabackup bool
- cell = cluster.DefaultCell
+ primary *cluster.Vttablet
+ replica1 *cluster.Vttablet
+ replica2 *cluster.Vttablet
+ localCluster *cluster.LocalProcessCluster
+ newInitDBFile string
+ currentSetupType int
+ cell = cluster.DefaultCell
hostname = "localhost"
keyspaceName = "ks"
@@ -62,16 +72,7 @@ var (
shardKsName = fmt.Sprintf("%s/%s", keyspaceName, shardName)
dbCredentialFile string
shardName = "0"
- commonTabletArg = []string{
- "--vreplication_healthcheck_topology_refresh", "1s",
- "--vreplication_healthcheck_retry_delay", "1s",
- "--vreplication_retry_delay", "1s",
- "--degraded_threshold", "5s",
- "--lock_tables_timeout", "5s",
- "--watch_replication_stream",
- "--enable_replication_reporter",
- "--serving_state_grace_period", "1s",
- }
+ commonTabletArg = getDefaultCommonArgs()
vtInsertTest = `
create table vt_insert_test (
@@ -90,6 +91,7 @@ type CompressionDetails struct {
// LaunchCluster : starts the cluster as per given params.
func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *CompressionDetails) (int, error) {
+ currentSetupType = setupType
localCluster = cluster.NewCluster(cell, hostname)
// Start topo server
@@ -124,9 +126,9 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp
extraArgs := []string{"--db-credentials-file", dbCredentialFile}
commonTabletArg = append(commonTabletArg, "--db-credentials-file", dbCredentialFile)
- // Update arguments for xtrabackup
- if setupType == XtraBackup {
- useXtrabackup = true
+ // Update arguments for different backup engines
+ switch setupType {
+ case XtraBackup:
xtrabackupArgs := []string{
"--backup_engine_implementation", "xtrabackup",
@@ -142,6 +144,18 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp
}
commonTabletArg = append(commonTabletArg, xtrabackupArgs...)
+ case MySQLShell:
+ mysqlShellBackupLocation := path.Join(localCluster.CurrentVTDATAROOT, "backups-mysqlshell")
+ err = os.MkdirAll(mysqlShellBackupLocation, 0o777)
+ if err != nil {
+ return 0, err
+ }
+
+ mysqlShellArgs := []string{
+ "--backup_engine_implementation", "mysqlshell",
+ "--mysql-shell-backup-location", mysqlShellBackupLocation,
+ }
+ commonTabletArg = append(commonTabletArg, mysqlShellArgs...)
}
commonTabletArg = append(commonTabletArg, getCompressorArgs(cDetails)...)
@@ -749,6 +763,23 @@ func terminatedRestore(t *testing.T) {
stopAllTablets()
}
+func checkTabletType(t *testing.T, alias string, tabletType topodata.TabletType) {
+ t.Helper()
+ // for loop for 15 seconds to check if tablet type is correct
+ for i := 0; i < 15; i++ {
+ output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetTablet", alias)
+ require.NoError(t, err)
+ var tabletPB topodata.Tablet
+ err = json2.UnmarshalPB([]byte(output), &tabletPB)
+ require.NoError(t, err)
+ if tabletType == tabletPB.Type {
+ return
+ }
+ time.Sleep(1 * time.Second)
+ }
+ require.Failf(t, "checkTabletType failed.", "Tablet type is not correct. Expected: %v", tabletType)
+}
+
// test_backup will:
// - create a shard with primary and replica1 only
// - run InitShardPrimary
@@ -886,9 +917,8 @@ func verifySemiSyncStatus(t *testing.T, vttablet *cluster.Vttablet, expectedStat
func terminateRestore(t *testing.T) {
stopRestoreMsg := "Copying file 10"
- if useXtrabackup {
+ if currentSetupType == XtraBackup {
stopRestoreMsg = "Restore: Preparing"
- useXtrabackup = false
}
args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", "--", primary.Alias)
@@ -917,3 +947,507 @@ func terminateRestore(t *testing.T) {
}
assert.True(t, found, "Restore message not found")
}
+
+func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, replicaIndex int) (backups []string) {
+ replica := getReplica(t, replicaIndex)
+ numBackups := len(waitForNumBackups(t, -1))
+
+ err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica.Alias)
+ require.NoError(t, err)
+
+ backups = waitForNumBackups(t, numBackups+1)
+ require.NotEmpty(t, backups)
+
+ verifyTabletBackupStats(t, replica.VttabletProcess.GetVars())
+
+ return backups
+}
+
+func GetReplicaPosition(t *testing.T, replicaIndex int) string {
+ replica := getReplica(t, replicaIndex)
+ pos, _ := cluster.GetPrimaryPosition(t, *replica, hostname)
+ return pos
+}
+
+func GetReplicaGtidPurged(t *testing.T, replicaIndex int) string {
+ replica := getReplica(t, replicaIndex)
+ query := "select @@global.gtid_purged as gtid_purged"
+ rs, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ row := rs.Named().Row()
+ require.NotNil(t, row)
+ return row.AsString("gtid_purged", "")
+}
+
+func ReconnectReplicaToPrimary(t *testing.T, replicaIndex int) {
+ query := fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='localhost', SOURCE_PORT=%d, SOURCE_USER='vt_repl', SOURCE_AUTO_POSITION = 1", primary.MySQLPort)
+ replica := getReplica(t, replicaIndex)
+ _, err := replica.VttabletProcess.QueryTablet("stop replica", keyspaceName, true)
+ require.NoError(t, err)
+ _, err = replica.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ _, err = replica.VttabletProcess.QueryTablet("start replica", keyspaceName, true)
+ require.NoError(t, err)
+}
+
+func InsertRowOnPrimary(t *testing.T, hint string) {
+ if hint == "" {
+ hint = textutil.RandomHash()[:12]
+ }
+ query, err := sqlparser.ParseAndBind("insert into vt_insert_test (msg) values (%a)", sqltypes.StringBindVariable(hint))
+ require.NoError(t, err)
+ _, err = primary.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+}
+
+func ReadRowsFromTablet(t *testing.T, tablet *cluster.Vttablet) (msgs []string) {
+ query := "select msg from vt_insert_test"
+ rs, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ for _, row := range rs.Named().Rows {
+ msg, err := row.ToString("msg")
+ require.NoError(t, err)
+ msgs = append(msgs, msg)
+ }
+ return msgs
+}
+
+func ReadRowsFromPrimary(t *testing.T) (msgs []string) {
+ return ReadRowsFromTablet(t, primary)
+}
+
+func getReplica(t *testing.T, replicaIndex int) *cluster.Vttablet {
+ switch replicaIndex {
+ case 0:
+ return replica1
+ case 1:
+ return replica2
+ default:
+ assert.Failf(t, "invalid replica index", "index=%d", replicaIndex)
+ return nil
+ }
+}
+
+func ReadRowsFromReplica(t *testing.T, replicaIndex int) (msgs []string) {
+ return ReadRowsFromTablet(t, getReplica(t, replicaIndex))
+}
+
+// FlushBinaryLogsOnReplica issues `FLUSH BINARY LOGS` times
+func FlushBinaryLogsOnReplica(t *testing.T, replicaIndex int, count int) {
+ replica := getReplica(t, replicaIndex)
+ query := "flush binary logs"
+ for i := 0; i < count; i++ {
+ _, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ }
+}
+
+// FlushAndPurgeBinaryLogsOnReplica intentionally loses all existing binary logs. It flushes into a new binary log
+// and immediately purges all previous logs.
+// This is used to lose information.
+func FlushAndPurgeBinaryLogsOnReplica(t *testing.T, replicaIndex int) (lastBinlog string) {
+ FlushBinaryLogsOnReplica(t, replicaIndex, 1)
+
+ replica := getReplica(t, replicaIndex)
+ {
+ query := "show binary logs"
+ rs, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ require.NotEmpty(t, rs.Rows)
+ for _, row := range rs.Rows {
+ // binlog file name is first column
+ lastBinlog = row[0].ToString()
+ }
+ }
+ {
+ query, err := sqlparser.ParseAndBind("purge binary logs to %a", sqltypes.StringBindVariable(lastBinlog))
+ require.NoError(t, err)
+ _, err = replica.VttabletProcess.QueryTablet(query, keyspaceName, true)
+ require.NoError(t, err)
+ }
+ return lastBinlog
+}
+
+func readManifestFile(t *testing.T, backupLocation string) (manifest *mysqlctl.BackupManifest) {
+ // reading manifest
+ fullPath := backupLocation + "/MANIFEST"
+ data, err := os.ReadFile(fullPath)
+ require.NoErrorf(t, err, "error while reading MANIFEST %v", err)
+
+ // parsing manifest
+ err = json.Unmarshal(data, &manifest)
+ require.NoErrorf(t, err, "error while parsing MANIFEST %v", err)
+ require.NotNil(t, manifest)
+ return manifest
+}
+
+func TestReplicaFullBackup(t *testing.T, replicaIndex int) (manifest *mysqlctl.BackupManifest) {
+ backups := vtctlBackupReplicaNoDestroyNoWrites(t, replicaIndex)
+
+ backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backups[len(backups)-1]
+ return readManifestFile(t, backupLocation)
+}
+
+// waitForNumBackups waits for GetBackups to list exactly the given expected number.
+// If expectNumBackups < 0 then any response is considered valid
+func waitForNumBackups(t *testing.T, expectNumBackups int) []string {
+ ctx, cancel := context.WithTimeout(context.Background(), topoConsistencyTimeout)
+ defer cancel()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ backups, err := localCluster.ListBackups(shardKsName)
+ require.NoError(t, err)
+ if expectNumBackups < 0 {
+ // any result is valid
+ return backups
+ }
+ if len(backups) == expectNumBackups {
+ // what we waited for
+ return backups
+ }
+ assert.Less(t, len(backups), expectNumBackups)
+ select {
+ case <-ctx.Done():
+ assert.Failf(t, ctx.Err().Error(), "expected %d backups, got %d", expectNumBackups, len(backups))
+ return nil
+ case <-ticker.C:
+ }
+ }
+}
+
+func testReplicaIncrementalBackup(t *testing.T, replica *cluster.Vttablet, incrementalFromPos string, expectEmpty bool, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) {
+ numBackups := len(waitForNumBackups(t, -1))
+
+ output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("Backup", "--incremental-from-pos", incrementalFromPos, replica.Alias)
+ if expectError != "" {
+ require.Errorf(t, err, "expected: %v", expectError)
+ require.Contains(t, output, expectError)
+ return nil, ""
+ }
+ require.NoErrorf(t, err, "output: %v", output)
+
+ if expectEmpty {
+ require.Contains(t, output, mysqlctl.EmptyBackupMessage)
+ return nil, ""
+ }
+
+ backups := waitForNumBackups(t, numBackups+1)
+ require.NotEmptyf(t, backups, "output: %v", output)
+
+ verifyTabletBackupStats(t, replica.VttabletProcess.GetVars())
+ backupName = backups[len(backups)-1]
+
+ backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backupName
+ return readManifestFile(t, backupLocation), backupName
+}
+
+func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFromPos string, expectEmpty bool, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) {
+ replica := getReplica(t, replicaIndex)
+ return testReplicaIncrementalBackup(t, replica, incrementalFromPos, expectEmpty, expectError)
+}
+
+func TestReplicaFullRestore(t *testing.T, replicaIndex int, expectError string) {
+ replica := getReplica(t, replicaIndex)
+
+ output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias)
+ if expectError != "" {
+ require.Errorf(t, err, "expected: %v", expectError)
+ require.Contains(t, output, expectError)
+ return
+ }
+ require.NoErrorf(t, err, "output: %v", output)
+ verifyTabletRestoreStats(t, replica.VttabletProcess.GetVars())
+}
+
+func TestReplicaRestoreToPos(t *testing.T, replicaIndex int, restoreToPos mysql.Position, expectError string) {
+ replica := getReplica(t, replicaIndex)
+
+ require.False(t, restoreToPos.IsZero())
+ restoreToPosArg := mysql.EncodePosition(restoreToPos)
+ assert.Contains(t, restoreToPosArg, "MySQL56/")
+ if rand.Intn(2) == 0 {
+ // Verify that restore works whether or not the MySQL56/ prefix is present.
+ restoreToPosArg = strings.Replace(restoreToPosArg, "MySQL56/", "", 1)
+ assert.NotContains(t, restoreToPosArg, "MySQL56/")
+ }
+
+ output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-pos", restoreToPosArg, replica.Alias)
+ if expectError != "" {
+ require.Errorf(t, err, "expected: %v", expectError)
+ require.Contains(t, output, expectError)
+ return
+ }
+ require.NoErrorf(t, err, "output: %v", output)
+ verifyTabletRestoreStats(t, replica.VttabletProcess.GetVars())
+ checkTabletType(t, replica1.Alias, topodata.TabletType_DRAINED)
+}
+
+func TestReplicaRestoreToTimestamp(t *testing.T, restoreToTimestamp time.Time, expectError string) {
+ require.False(t, restoreToTimestamp.IsZero())
+ restoreToTimestampArg := mysqlctl.FormatRFC3339(restoreToTimestamp)
+ output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-timestamp", restoreToTimestampArg, replica1.Alias)
+ if expectError != "" {
+ require.Errorf(t, err, "expected: %v", expectError)
+ require.Contains(t, output, expectError)
+ return
+ }
+ require.NoErrorf(t, err, "output: %v", output)
+ verifyTabletRestoreStats(t, replica1.VttabletProcess.GetVars())
+ checkTabletType(t, replica1.Alias, topodata.TabletType_DRAINED)
+}
+
+func verifyTabletBackupStats(t *testing.T, vars map[string]any) {
+ switch currentSetupType {
+ // Currently only the builtin backup engine instruments bytes-processed counts.
+ case BuiltinBackup:
+ require.Contains(t, vars, "BackupBytes")
+ bb := vars["BackupBytes"].(map[string]any)
+ require.Contains(t, bb, "BackupEngine.Builtin.Compressor:Write")
+ require.Contains(t, bb, "BackupEngine.Builtin.Destination:Write")
+ require.Contains(t, bb, "BackupEngine.Builtin.Source:Read")
+ if backupstorage.BackupStorageImplementation == "file" {
+ require.Contains(t, bb, "BackupStorage.File.File:Write")
+ }
+ }
+
+ require.Contains(t, vars, "BackupCount")
+ bc := vars["BackupCount"].(map[string]any)
+ require.Contains(t, bc, "-.-.Backup")
+
+ switch currentSetupType {
+ // Currently only the builtin backup engine instruments bytes-processed counts.
+ case BuiltinBackup:
+ require.Contains(t, bc, "BackupEngine.Builtin.Compressor:Close")
+ require.Contains(t, bc, "BackupEngine.Builtin.Destination:Close")
+ require.Contains(t, bc, "BackupEngine.Builtin.Destination:Open")
+ require.Contains(t, bc, "BackupEngine.Builtin.Source:Close")
+ require.Contains(t, bc, "BackupEngine.Builtin.Source:Open")
+ }
+
+ require.Contains(t, vars, "BackupDurationNanoseconds")
+ bd := vars["BackupDurationNanoseconds"]
+ require.Contains(t, bd, "-.-.Backup")
+
+ switch currentSetupType {
+ // Currently only the builtin backup engine emits timings.
+ case BuiltinBackup:
+ require.Contains(t, bd, "BackupEngine.Builtin.Compressor:Close")
+ require.Contains(t, bd, "BackupEngine.Builtin.Compressor:Write")
+ require.Contains(t, bd, "BackupEngine.Builtin.Destination:Close")
+ require.Contains(t, bd, "BackupEngine.Builtin.Destination:Open")
+ require.Contains(t, bd, "BackupEngine.Builtin.Destination:Write")
+ require.Contains(t, bd, "BackupEngine.Builtin.Source:Close")
+ require.Contains(t, bd, "BackupEngine.Builtin.Source:Open")
+ require.Contains(t, bd, "BackupEngine.Builtin.Source:Read")
+ }
+
+ if backupstorage.BackupStorageImplementation == "file" {
+ require.Contains(t, bd, "BackupStorage.File.File:Write")
+ }
+
+}
+
+func verifyRestorePositionAndTimeStats(t *testing.T, vars map[string]any) {
+ backupPosition := vars["RestorePosition"].(string)
+ backupTime := vars["RestoredBackupTime"].(string)
+ require.Contains(t, vars, "RestoredBackupTime")
+ require.Contains(t, vars, "RestorePosition")
+ require.NotEqual(t, "", backupPosition)
+ require.NotEqual(t, "", backupTime)
+ rp, err := mysql.DecodePosition(backupPosition)
+ require.NoError(t, err)
+ require.False(t, rp.IsZero())
+}
+
+func verifyTabletRestoreStats(t *testing.T, vars map[string]any) {
+ // Currently only the builtin backup engine instruments bytes-processed
+ // counts.
+
+ verifyRestorePositionAndTimeStats(t, vars)
+
+ switch currentSetupType {
+ case BuiltinBackup:
+ require.Contains(t, vars, "RestoreBytes")
+ bb := vars["RestoreBytes"].(map[string]any)
+ require.Contains(t, bb, "BackupEngine.Builtin.Decompressor:Read")
+ require.Contains(t, bb, "BackupEngine.Builtin.Destination:Write")
+ require.Contains(t, bb, "BackupEngine.Builtin.Source:Read")
+ require.Contains(t, bb, "BackupStorage.File.File:Read")
+ }
+
+ require.Contains(t, vars, "RestoreCount")
+ bc := vars["RestoreCount"].(map[string]any)
+ require.Contains(t, bc, "-.-.Restore")
+
+ switch currentSetupType {
+ // Currently only the builtin backup engine emits operation counts.
+ case BuiltinBackup:
+ require.Contains(t, bc, "BackupEngine.Builtin.Decompressor:Close")
+ require.Contains(t, bc, "BackupEngine.Builtin.Destination:Close")
+ require.Contains(t, bc, "BackupEngine.Builtin.Destination:Open")
+ require.Contains(t, bc, "BackupEngine.Builtin.Source:Close")
+ require.Contains(t, bc, "BackupEngine.Builtin.Source:Open")
+ }
+
+ require.Contains(t, vars, "RestoreDurationNanoseconds")
+ bd := vars["RestoreDurationNanoseconds"]
+ require.Contains(t, bd, "-.-.Restore")
+
+ switch currentSetupType {
+ // Currently only the builtin backup engine emits timings.
+ case BuiltinBackup:
+ require.Contains(t, bd, "BackupEngine.Builtin.Decompressor:Close")
+ require.Contains(t, bd, "BackupEngine.Builtin.Decompressor:Read")
+ require.Contains(t, bd, "BackupEngine.Builtin.Destination:Close")
+ require.Contains(t, bd, "BackupEngine.Builtin.Destination:Open")
+ require.Contains(t, bd, "BackupEngine.Builtin.Destination:Write")
+ require.Contains(t, bd, "BackupEngine.Builtin.Source:Close")
+ require.Contains(t, bd, "BackupEngine.Builtin.Source:Open")
+ require.Contains(t, bd, "BackupEngine.Builtin.Source:Read")
+ }
+
+ require.Contains(t, bd, "BackupStorage.File.File:Read")
+}
+
+func getDefaultCommonArgs() []string {
+ return []string{
+ "--vreplication_healthcheck_topology_refresh", "1s",
+ "--vreplication_healthcheck_retry_delay", "1s",
+ "--vreplication_retry_delay", "1s",
+ "--degraded_threshold", "5s",
+ "--lock_tables_timeout", "5s",
+ "--watch_replication_stream",
+ "--enable_replication_reporter",
+ "--serving_state_grace_period", "1s",
+ }
+}
+
+func setDefaultCommonArgs() { commonTabletArg = getDefaultCommonArgs() }
+
+// fetch the backup engine used on the last backup triggered by the end-to-end tests.
+func getBackupEngineOfLastBackup(t *testing.T) string {
+ lastBackup := getLastBackup(t)
+
+ manifest := readManifestFile(t, path.Join(localCluster.CurrentVTDATAROOT, "backups", keyspaceName, shardName, lastBackup))
+
+ return manifest.BackupMethod
+}
+
+func getLastBackup(t *testing.T) string {
+ backups, err := localCluster.ListBackups(shardKsName)
+ require.NoError(t, err)
+
+ return backups[len(backups)-1]
+}
+
+func TestBackupEngineSelector(t *testing.T) {
+ defer setDefaultCommonArgs()
+ defer cluster.PanicHandler(t)
+
+ // launch the custer with xtrabackup as the default engine
+ code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"})
+ require.Nilf(t, err, "setup failed with status code %d", code)
+
+ defer TearDownCluster()
+
+ verifyInitialReplication(t)
+
+ t.Run("backup with backup-engine=builtin", func(t *testing.T) {
+ // first try to backup with an alternative engine (builtin)
+ err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias)
+ require.NoError(t, err)
+ engineUsed := getBackupEngineOfLastBackup(t)
+ require.Equal(t, "builtin", engineUsed)
+ })
+
+ t.Run("backup with backup-engine=xtrabackup", func(t *testing.T) {
+ // then try to backup specifying the xtrabackup engine
+ err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias)
+ require.NoError(t, err)
+ engineUsed := getBackupEngineOfLastBackup(t)
+ require.Equal(t, "xtrabackup", engineUsed)
+ })
+
+ t.Run("backup without specifying backup-engine", func(t *testing.T) {
+ // check that by default we still use the xtrabackup engine if not specified
+ err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias)
+ require.NoError(t, err)
+ engineUsed := getBackupEngineOfLastBackup(t)
+ require.Equal(t, "xtrabackup", engineUsed)
+ })
+}
+
+func TestRestoreAllowedBackupEngines(t *testing.T) {
+ defer setDefaultCommonArgs()
+ defer cluster.PanicHandler(t)
+
+ backupMsg := "right after xtrabackup backup"
+
+ cDetails := &CompressionDetails{CompressorEngineName: "pgzip"}
+
+ // launch the custer with xtrabackup as the default engine
+ code, err := LaunchCluster(XtraBackup, "xbstream", 0, cDetails)
+ require.Nilf(t, err, "setup failed with status code %d", code)
+
+ defer TearDownCluster()
+
+ verifyInitialReplication(t)
+
+ t.Run("generate backups", func(t *testing.T) {
+ // lets take two backups, each using a different backup engine
+ err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias)
+ require.NoError(t, err)
+
+ err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias)
+ require.NoError(t, err)
+ })
+
+ // insert more data on the primary
+ _, err = primary.VttabletProcess.QueryTablet(fmt.Sprintf("insert into vt_insert_test (msg) values ('%s')", backupMsg), keyspaceName, true)
+ require.NoError(t, err)
+
+ t.Run("restore replica and verify data", func(t *testing.T) {
+ // now bring up another replica, letting it restore from backup.
+ restoreWaitForBackup(t, "replica", cDetails, true)
+ err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout)
+ require.NoError(t, err)
+
+ // check the new replica has the data
+ cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
+ result, err := replica2.VttabletProcess.QueryTablet(
+ fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true)
+ require.NoError(t, err)
+ require.Equal(t, backupMsg, result.Named().Row().AsString("msg", ""))
+ })
+
+ t.Run("test broken restore", func(t *testing.T) {
+ // now lets break the last backup in the shard
+ err = os.Remove(path.Join(localCluster.CurrentVTDATAROOT,
+ "backups", keyspaceName, shardName,
+ getLastBackup(t), "backup.xbstream.gz"))
+ require.NoError(t, err)
+
+ // and try to restore from it
+ err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", replica2.Alias)
+ require.Error(t, err) // this should fail
+ })
+
+ t.Run("test older working backup", func(t *testing.T) {
+ // now we retry but with the first backup
+ err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--allowed-backup-engines=builtin", replica2.Alias)
+ require.NoError(t, err) // this should succeed
+
+ // make sure we are replicating after the restore is done
+ err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout)
+ require.NoError(t, err)
+ cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
+
+ result, err := replica2.VttabletProcess.QueryTablet(
+ fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true)
+ require.NoError(t, err)
+ require.Equal(t, backupMsg, result.Named().Row().AsString("msg", ""))
+ })
+}
diff --git a/go/test/endtoend/backup/xtrabackup/select_engine_test.go b/go/test/endtoend/backup/xtrabackup/select_engine_test.go
new file mode 100644
index 00000000000..582cb58d874
--- /dev/null
+++ b/go/test/endtoend/backup/xtrabackup/select_engine_test.go
@@ -0,0 +1,17 @@
+package vtctlbackup
+
+import (
+ "testing"
+
+ backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup"
+)
+
+func TestBackupEngineSelector(t *testing.T) {
+ defer setDefaultCompressionFlag()
+ backup.TestBackupEngineSelector(t)
+}
+
+func TestRestoreAllowedBackupEngines(t *testing.T) {
+ defer setDefaultCompressionFlag()
+ backup.TestRestoreAllowedBackupEngines(t)
+}
diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go
index 36868656ccb..9090acf79d9 100644
--- a/go/test/endtoend/cellalias/cell_alias_test.go
+++ b/go/test/endtoend/cellalias/cell_alias_test.go
@@ -28,6 +28,7 @@ import (
"os"
"os/exec"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -334,12 +335,9 @@ func TestAddAliasWhileVtgateUp(t *testing.T) {
func waitTillAllTabletsAreHealthyInVtgate(t *testing.T, vtgateInstance cluster.VtgateProcess, shards ...string) {
for _, shard := range shards {
- err := vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shard), 1)
- require.Nil(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1)
- require.Nil(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1)
- require.Nil(t, err)
+ require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shard), 1, 30*time.Second))
+ require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1, 30*time.Second))
+ require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1, 30*time.Second))
}
}
diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go
index cb6fbb4fd40..c0907d24b26 100644
--- a/go/test/endtoend/cluster/cluster_process.go
+++ b/go/test/endtoend/cluster/cluster_process.go
@@ -248,9 +248,26 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) {
}
cluster.VtctlclientProcess = *VtctlClientProcessInstance("localhost", cluster.VtctldProcess.GrpcPort, cluster.TmpDirectory)
+ cluster.VtctldClientProcess = *VtctldClientProcessInstance("localhost", cluster.VtctldProcess.GrpcPort, cluster.TmpDirectory)
return
}
+// StartVTOrc starts a VTOrc instance
+func (cluster *LocalProcessCluster) StartVTOrc(keyspace string) error {
+ // Start vtorc
+ vtorcProcess := cluster.NewVTOrcProcess(VTOrcConfiguration{})
+ err := vtorcProcess.Setup()
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+ if keyspace != "" {
+ vtorcProcess.ExtraArgs = append(vtorcProcess.ExtraArgs, fmt.Sprintf(`--clusters_to_watch="%s"`, keyspace))
+ }
+ cluster.VTOrcProcesses = append(cluster.VTOrcProcesses, vtorcProcess)
+ return nil
+}
+
// StartUnshardedKeyspace starts unshared keyspace with shard name as "0"
func (cluster *LocalProcessCluster) StartUnshardedKeyspace(keyspace Keyspace, replicaCount int, rdonly bool) error {
return cluster.StartKeyspace(keyspace, []string{"0"}, replicaCount, rdonly)
@@ -757,14 +774,14 @@ func (cluster *LocalProcessCluster) WaitForTabletsToHealthyInVtgate() (err error
rdonlyTabletCount++
}
}
- if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1); err != nil {
+ if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1, 2*time.Minute); err != nil {
return err
}
if replicaTabletCount > 0 {
- err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), replicaTabletCount)
+ err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), replicaTabletCount, 2*time.Minute)
}
if rdonlyTabletCount > 0 {
- err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), rdonlyTabletCount)
+ err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), rdonlyTabletCount, 2*time.Minute)
}
if err != nil {
return err
@@ -782,7 +799,7 @@ func (cluster *LocalProcessCluster) ExecOnTablet(ctx context.Context, vttablet *
return nil, err
}
- tablet, err := cluster.vtctlclientGetTablet(vttablet)
+ tablet, err := cluster.VtctlclientGetTablet(vttablet)
if err != nil {
return nil, err
}
@@ -825,7 +842,7 @@ func (cluster *LocalProcessCluster) ExecOnVTGate(ctx context.Context, addr strin
// returns the responses. It returns an error if the stream ends with fewer than
// `count` responses.
func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vttablet *Vttablet, count int) (responses []*querypb.StreamHealthResponse, err error) {
- tablet, err := cluster.vtctlclientGetTablet(vttablet)
+ tablet, err := cluster.VtctlclientGetTablet(vttablet)
if err != nil {
return nil, err
}
@@ -857,7 +874,7 @@ func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vtta
return responses, nil
}
-func (cluster *LocalProcessCluster) vtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) {
+func (cluster *LocalProcessCluster) VtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) {
result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", "--", tablet.Alias)
if err != nil {
return nil, err
diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go
index 97ccaf80f86..3478d5dc806 100644
--- a/go/test/endtoend/cluster/mysqlctl_process.go
+++ b/go/test/endtoend/cluster/mysqlctl_process.go
@@ -65,7 +65,16 @@ func (mysqlctl *MysqlctlProcess) InitDb() (err error) {
// Start executes mysqlctl command to start mysql instance
func (mysqlctl *MysqlctlProcess) Start() (err error) {
- tmpProcess, err := mysqlctl.StartProcess()
+ tmpProcess, err := mysqlctl.startProcess(true)
+ if err != nil {
+ return err
+ }
+ return tmpProcess.Wait()
+}
+
+// StartProvideInit executes mysqlctl command to start mysql instance
+func (mysqlctl *MysqlctlProcess) StartProvideInit(init bool) (err error) {
+ tmpProcess, err := mysqlctl.startProcess(init)
if err != nil {
return err
}
@@ -74,6 +83,10 @@ func (mysqlctl *MysqlctlProcess) Start() (err error) {
// StartProcess starts the mysqlctl and returns the process reference
func (mysqlctl *MysqlctlProcess) StartProcess() (*exec.Cmd, error) {
+ return mysqlctl.startProcess(true)
+}
+
+func (mysqlctl *MysqlctlProcess) startProcess(init bool) (*exec.Cmd, error) {
tmpProcess := exec.Command(
mysqlctl.Binary,
"--log_dir", mysqlctl.LogDirectory,
@@ -120,8 +133,10 @@ ssl_key={{.Dir}}/server-001-key.pem
tmpProcess.Env = append(tmpProcess.Env, "VTDATAROOT="+os.Getenv("VTDATAROOT"))
}
- tmpProcess.Args = append(tmpProcess.Args, "init", "--",
- "--init_db_sql_file", mysqlctl.InitDBFile)
+ if init {
+ tmpProcess.Args = append(tmpProcess.Args, "init", "--",
+ "--init_db_sql_file", mysqlctl.InitDBFile)
+ }
}
tmpProcess.Args = append(tmpProcess.Args, "start")
log.Infof("Starting mysqlctl with command: %v", tmpProcess.Args)
diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go
index 074234fb97c..48408a8db34 100644
--- a/go/test/endtoend/cluster/vtctld_process.go
+++ b/go/test/endtoend/cluster/vtctld_process.go
@@ -58,8 +58,6 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error)
"--topo_global_server_address", vtctld.CommonArg.TopoGlobalAddress,
"--topo_global_root", vtctld.CommonArg.TopoGlobalRoot,
"--cell", cell,
- "--workflow_manager_init",
- "--workflow_manager_use_election",
"--service_map", vtctld.ServiceMap,
"--backup_storage_implementation", vtctld.BackupStorageImplementation,
"--file_backup_storage_root", vtctld.FileBackupStorageRoot,
@@ -67,6 +65,7 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error)
"--port", fmt.Sprintf("%d", vtctld.Port),
"--grpc_port", fmt.Sprintf("%d", vtctld.GrpcPort),
)
+
if *isCoverage {
vtctld.proc.Args = append(vtctld.proc.Args, "--test.coverprofile="+getCoveragePath("vtctld.out"))
}
diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go
index 54166f47fac..eb6bcd5881c 100644
--- a/go/test/endtoend/cluster/vtgate_process.go
+++ b/go/test/endtoend/cluster/vtgate_process.go
@@ -200,11 +200,11 @@ func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string, endPointsCou
// WaitForStatusOfTabletInShard function waits till status of a tablet in shard is 1
// endPointsCount: how many endpoints to wait for
-func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int) error {
+func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int, timeout time.Duration) error {
log.Infof("Waiting for healthy status of %d %s tablets in cell %s",
endPointsCount, name, vtgate.Cell)
- timeout := time.Now().Add(30 * time.Second)
- for time.Now().Before(timeout) {
+ deadline := time.Now().Add(timeout)
+ for time.Now().Before(deadline) {
if vtgate.GetStatusForTabletOfShard(name, endPointsCount) {
return nil
}
diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go
index 14f475fa48c..17a4aa5c54e 100644
--- a/go/test/endtoend/cluster/vtorc_process.go
+++ b/go/test/endtoend/cluster/vtorc_process.go
@@ -36,14 +36,15 @@ import (
// vtorc as a separate process for testing
type VTOrcProcess struct {
VtctlProcess
- Port int
- LogDir string
- ExtraArgs []string
- ConfigPath string
- Config VTOrcConfiguration
- WebPort int
- proc *exec.Cmd
- exit chan error
+ Port int
+ LogDir string
+ LogFileName string
+ ExtraArgs []string
+ ConfigPath string
+ Config VTOrcConfiguration
+ WebPort int
+ proc *exec.Cmd
+ exit chan error
}
type VTOrcConfiguration struct {
@@ -123,7 +124,10 @@ func (orc *VTOrcProcess) Setup() (err error) {
orc.proc.Args = append(orc.proc.Args, orc.ExtraArgs...)
orc.proc.Args = append(orc.proc.Args, "--alsologtostderr")
- errFile, _ := os.Create(path.Join(orc.LogDir, fmt.Sprintf("orc-stderr-%d.txt", timeNow)))
+ if orc.LogFileName == "" {
+ orc.LogFileName = fmt.Sprintf("orc-stderr-%d.txt", timeNow)
+ }
+ errFile, _ := os.Create(path.Join(orc.LogDir, orc.LogFileName))
orc.proc.Stderr = errFile
orc.proc.Env = append(orc.proc.Env, os.Environ()...)
diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go
index 46f55f579e3..60ca23a059e 100644
--- a/go/test/endtoend/cluster/vttablet_process.go
+++ b/go/test/endtoend/cluster/vttablet_process.go
@@ -117,9 +117,25 @@ func (vttablet *VttabletProcess) Setup() (err error) {
if vttablet.SupportsBackup {
vttablet.proc.Args = append(vttablet.proc.Args, "--restore_from_backup")
}
+ var majorVersion int
+ majorVersion, err = GetMajorVersion("vttablet")
+ if err != nil {
+ return err
+ }
+
if vttablet.EnableSemiSync {
- vttablet.proc.Args = append(vttablet.proc.Args, "--enable_semi_sync")
+ // enable_semi_sync is removed in v16 and shouldn't be set on any release v16+
+ if majorVersion <= 15 {
+ vttablet.proc.Args = append(vttablet.proc.Args, "--enable_semi_sync")
+ }
}
+
+ // enable_semi_sync is removed in v16 and shouldn't be set on any release v16+
+ if majorVersion >= 16 {
+ disableReplicationFlag := "--disable-replication-manager"
+ vttablet.proc.Args = append(vttablet.proc.Args, disableReplicationFlag)
+ }
+
if vttablet.DbFlavor != "" {
vttablet.proc.Args = append(vttablet.proc.Args, fmt.Sprintf("--db_flavor=%s", vttablet.DbFlavor))
}
diff --git a/go/test/endtoend/clustertest/main_test.go b/go/test/endtoend/clustertest/main_test.go
index 0be66d56af8..35da40a3edb 100644
--- a/go/test/endtoend/clustertest/main_test.go
+++ b/go/test/endtoend/clustertest/main_test.go
@@ -107,9 +107,10 @@ func testURL(t *testing.T, url string, testCaseName string) {
// getStatusForUrl returns the status code for the URL
func getStatusForURL(url string) int {
- resp, _ := http.Get(url)
- if resp != nil {
- return resp.StatusCode
+ resp, err := http.Get(url)
+ if err != nil {
+ return 0
}
- return 0
+ defer resp.Body.Close()
+ return resp.StatusCode
}
diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go
index 36fcb51d97d..7533ce4a24e 100644
--- a/go/test/endtoend/clustertest/vtctld_test.go
+++ b/go/test/endtoend/clustertest/vtctld_test.go
@@ -62,13 +62,15 @@ func TestVtctldProcess(t *testing.T) {
func testTopoDataAPI(t *testing.T, url string) {
resp, err := http.Get(url)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, resp.StatusCode, 200)
resultMap := make(map[string]any)
- respByte, _ := io.ReadAll(resp.Body)
+ respByte, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
err = json.Unmarshal(respByte, &resultMap)
- require.Nil(t, err)
+ require.NoError(t, err)
errorValue := reflect.ValueOf(resultMap["Error"])
assert.Empty(t, errorValue.String())
@@ -83,7 +85,7 @@ func testTopoDataAPI(t *testing.T, url string) {
func testListAllTablets(t *testing.T) {
// first w/o any filters, aside from cell
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ListAllTablets", clusterInstance.Cell)
- require.Nil(t, err)
+ require.NoError(t, err)
tablets := getAllTablets()
@@ -104,7 +106,7 @@ func testListAllTablets(t *testing.T) {
"ListAllTablets", "--", "--keyspace", clusterInstance.Keyspaces[0].Name,
"--tablet_type", "primary",
clusterInstance.Cell)
- require.Nil(t, err)
+ require.NoError(t, err)
// We should only return a single primary tablet per shard in the first keyspace
tabletsFromCMD = strings.Split(result, "\n")
@@ -115,9 +117,10 @@ func testListAllTablets(t *testing.T) {
func testTabletStatus(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("http://%s:%d", clusterInstance.Hostname, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort))
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
respByte, err := io.ReadAll(resp.Body)
- require.Nil(t, err)
+ require.NoError(t, err)
result := string(respByte)
log.Infof("Tablet status response: %v", result)
assert.True(t, strings.Contains(result, `Alias: \n", status)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
cancel() // will cause runMultipleConnections() to terminate
wg.Wait()
+ }
+ t.Run("postponed revert", func(t *testing.T) {
+ testPostponedRevert(t, schema.OnlineDDLStatusRunning)
checkMigratedTable(t, tableName, alterHints[1])
testSelectTableMetrics(t)
})
+ t.Run("postponed revert view", func(t *testing.T) {
+ t.Run("CREATE VIEW again", func(t *testing.T) {
+ // The view does not exist
+ uuid := testOnlineDDLStatementForView(t, createViewStatement, ddlStrategy, "vtgate", "success_create")
+ uuids = append(uuids, uuid)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, true)
+ testRevertedUUID(t, uuid, "")
+ })
+ t.Run("ALTER VIEW, postpone completion", func(t *testing.T) {
+ // Technically this test better fits in `onlineddl_scheduler_test.go`, but since we've already laid the grounds here, this is where it landed.
+ // The view exists
+ checkTable(t, viewName, true)
+ uuid := testOnlineDDLStatementForView(t, alterViewStatement, ddlStrategy+" --postpone-completion", "vtgate", "success_create")
+ uuids = append(uuids, uuid)
+
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ // Issue a complete and wait for successful completion
+ onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true)
+ // This part may take a while, because we depend on vreplication polling
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 60*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
+ onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete)
+ checkTable(t, viewName, true)
+ testRevertedUUID(t, uuid, "")
+ })
+ // now verify that the revert for ALTER VIEW respects `--postpone-completion`
+ testPostponedRevert(t, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
+ checkTable(t, viewName, true)
+ })
+
// INSTANT DDL
t.Run("INSTANT DDL: add column", func(t *testing.T) {
uuid := testOnlineDDLStatementForTable(t, "alter table stress_test add column i_instant int not null default 0", ddlStrategy+" --fast-over-revertible", "vtgate", "i_instant")
diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go
index 05e1217a670..5275d455837 100644
--- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go
+++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go
@@ -41,8 +41,9 @@ var (
shards []cluster.Shard
vtParams mysql.ConnParams
- normalWaitTime = 20 * time.Second
- extendedWaitTime = 60 * time.Second
+ normalWaitTime = 20 * time.Second
+ extendedWaitTime = 60 * time.Second
+ ensureStateNotChangedTime = 5 * time.Second
hostname = "localhost"
keyspaceName = "ks"
@@ -79,6 +80,9 @@ var (
trivialAlterT2Statement = `
ALTER TABLE t2_test ENGINE=InnoDB;
`
+ instantAlterT1Statement = `
+ ALTER TABLE t1_test ADD COLUMN i0 INT NOT NULL DEFAULT 0;
+ `
dropT1Statement = `
DROP TABLE IF EXISTS t1_test
`
@@ -150,10 +154,8 @@ func TestMain(m *testing.M) {
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
- } else {
- os.Exit(exitcode)
}
-
+ os.Exit(exitcode)
}
func TestSchemaChange(t *testing.T) {
@@ -161,6 +163,9 @@ func TestSchemaChange(t *testing.T) {
shards = clusterInstance.Keyspaces[0].Shards
require.Equal(t, 1, len(shards))
+ mysqlVersion := onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet())
+ require.NotEmpty(t, mysqlVersion)
+
var t1uuid string
var t2uuid string
@@ -313,7 +318,7 @@ func TestSchemaChange(t *testing.T) {
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
// now that t1 is running, let's unblock t2. We expect it to remain queued.
onlineddl.CheckCompleteMigration(t, &vtParams, shards, t2uuid, true)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// t1 should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
// non-concurrent -- should be queued!
@@ -345,7 +350,7 @@ func TestSchemaChange(t *testing.T) {
t.Run("expect both running", func(t *testing.T) {
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, normalWaitTime, schema.OnlineDDLStatusRunning)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// both should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusRunning)
@@ -384,7 +389,7 @@ func TestSchemaChange(t *testing.T) {
// since all migrations are throttled, t1 migration is not ready_to_complete, hence
// t2 should not be running
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// both should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady)
@@ -393,7 +398,7 @@ func TestSchemaChange(t *testing.T) {
onlineddl.UnthrottleAllMigrations(t, &vtParams)
// t1 should now be ready_to_complete, hence t2 should start running
onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, extendedWaitTime, schema.OnlineDDLStatusRunning)
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// both should be still running!
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning)
onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusRunning)
@@ -566,7 +571,7 @@ func TestSchemaChange(t *testing.T) {
})
drop1uuid := testOnlineDDLStatement(t, dropT1Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true) // skip wait
t.Run("t1drop blocked", func(t *testing.T) {
- time.Sleep(5 * time.Second)
+ time.Sleep(ensureStateNotChangedTime)
// drop1 migration should block. It can run concurrently to t1, but conflicts on table name
onlineddl.CheckMigrationStatus(t, &vtParams, shards, drop1uuid, schema.OnlineDDLStatusReady)
})
@@ -639,6 +644,9 @@ func TestSchemaChange(t *testing.T) {
}
})
})
+ t.Run("summary: validate completed_timestamp", func(t *testing.T) {
+ onlineddl.ValidateCompletedTimestamp(t, &vtParams)
+ })
}
// testOnlineDDLStatement runs an online DDL, ALTER statement
diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go
index 2d7cd937cd5..118488c63b1 100644
--- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go
+++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go
@@ -20,7 +20,6 @@ import (
"flag"
"fmt"
"io"
- "net/http"
"os"
"path"
"strings"
@@ -47,8 +46,8 @@ var (
onlineDDLThrottlerAppName = "online-ddl"
vstreamerThrottlerAppName = "vstreamer"
- normalMigrationWait = 20 * time.Second
- extendedMigrationWait = 20 * time.Second
+ normalMigrationWait = 45 * time.Second
+ extendedMigrationWait = 60 * time.Second
hostname = "localhost"
keyspaceName = "ks"
@@ -221,24 +220,25 @@ func TestMain(m *testing.M) {
}
// direct per-tablet throttler API instruction
-func throttleResponse(tablet *cluster.Vttablet, path string) (resp *http.Response, respBody string, err error) {
+func throttleResponse(tablet *cluster.Vttablet, path string) (respBody string, err error) {
apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.VttabletProcess.TabletHostname, tablet.HTTPPort, path)
- resp, err = httpClient.Get(apiURL)
+ resp, err := httpClient.Get(apiURL)
if err != nil {
- return resp, respBody, err
+ return "", err
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
respBody = string(b)
- return resp, respBody, err
+ return respBody, err
}
// direct per-tablet throttler API instruction
-func throttleApp(tablet *cluster.Vttablet, app string) (*http.Response, string, error) {
+func throttleApp(tablet *cluster.Vttablet, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app))
}
// direct per-tablet throttler API instruction
-func unthrottleApp(tablet *cluster.Vttablet, app string) (*http.Response, string, error) {
+func unthrottleApp(tablet *cluster.Vttablet, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app))
}
@@ -398,7 +398,7 @@ func TestSchemaChange(t *testing.T) {
// vstreamer source; but it's OK to be on the safe side and throttle on all tablets. Doesn't
// change the essence of this test.
for _, tablet := range shard.Vttablets {
- _, body, err := throttleApp(tablet, vstreamerThrottlerAppName)
+ body, err := throttleApp(tablet, vstreamerThrottlerAppName)
defer unthrottleApp(tablet, vstreamerThrottlerAppName)
assert.NoError(t, err)
@@ -498,12 +498,12 @@ func TestSchemaChange(t *testing.T) {
case 0:
// this is the shard where we run PRS
// Use per-tablet throttling API
- _, body, err = throttleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
+ body, err = throttleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
defer unthrottleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
case 1:
// no PRS on this shard
// Use per-tablet throttling API
- _, body, err = throttleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err = throttleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
defer unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
}
assert.NoError(t, err)
@@ -555,11 +555,11 @@ func TestSchemaChange(t *testing.T) {
case 0:
// this is the shard where we run PRS
// Use per-tablet throttling API
- _, body, err = unthrottleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
+ body, err = unthrottleApp(shards[i].Vttablets[currentPrimaryTabletIndex], onlineDDLThrottlerAppName)
case 1:
// no PRS on this shard
// Use per-tablet throttling API
- _, body, err = unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err = unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName)
}
assert.NoError(t, err)
assert.Contains(t, body, onlineDDLThrottlerAppName)
@@ -684,7 +684,7 @@ func TestSchemaChange(t *testing.T) {
// shard 0 will run normally, shard 1 will be throttled
defer unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
t.Run("throttle shard 1", func(t *testing.T) {
- _, body, err := throttleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err := throttleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, onlineDDLThrottlerAppName)
})
@@ -708,7 +708,7 @@ func TestSchemaChange(t *testing.T) {
onlineddl.CheckCancelAllMigrations(t, &vtParams, 1)
})
t.Run("unthrottle shard 1", func(t *testing.T) {
- _, body, err := unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
+ body, err := unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, onlineDDLThrottlerAppName)
})
@@ -756,6 +756,9 @@ func TestSchemaChange(t *testing.T) {
}
})
})
+ t.Run("summary: validate completed_timestamp", func(t *testing.T) {
+ onlineddl.ValidateCompletedTimestamp(t, &vtParams)
+ })
}
func insertRow(t *testing.T) {
diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go
index 0531af319b4..57e7029f56b 100644
--- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go
+++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go
@@ -140,6 +140,7 @@ const (
maxConcurrency = 20
singleConnectionSleepInterval = 2 * time.Millisecond
countIterations = 5
+ migrationWaitTimeout = 60 * time.Second
)
func resetOpOrder() {
@@ -344,7 +345,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str
assert.NoError(t, err)
if !strategySetting.Strategy.IsDirect() {
- status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 30*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
+ status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed)
fmt.Printf("# Migration status (for debug purposes): <%s>\n", status)
}
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/different-pk-int-to-text/order_by b/go/test/endtoend/onlineddl/vrepl_suite/testdata/different-pk-int-to-text/order_by
new file mode 100644
index 00000000000..074d1eeb404
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/different-pk-int-to-text/order_by
@@ -0,0 +1 @@
+id
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns
new file mode 100644
index 00000000000..99f86097862
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns
@@ -0,0 +1 @@
+id, c1j
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter
new file mode 100644
index 00000000000..f2e64ff0894
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter
@@ -0,0 +1 @@
+change column c1 c1j json
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns
new file mode 100644
index 00000000000..b791aa0d27a
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns
@@ -0,0 +1 @@
+id, c1
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql
new file mode 100644
index 00000000000..5280498e9fd
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql
@@ -0,0 +1,22 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id int auto_increment,
+ c1 int not null,
+ primary key (id)
+) auto_increment=1;
+
+insert into onlineddl_test values (1, 11);
+insert into onlineddl_test values (2, 13);
+
+drop event if exists onlineddl_test;
+delimiter ;;
+create event onlineddl_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into onlineddl_test values (null, 17);
+end ;;
diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go
index 86ea963465a..1b6def8ecf4 100644
--- a/go/test/endtoend/onlineddl/vtgate_util.go
+++ b/go/test/endtoend/onlineddl/vtgate_util.go
@@ -35,6 +35,14 @@ import (
"github.com/stretchr/testify/require"
)
+var (
+ testsStartupTime time.Time
+)
+
+func init() {
+ testsStartupTime = time.Now()
+}
+
// VtgateExecQuery runs a query on VTGate using given query params
func VtgateExecQuery(t *testing.T, vtParams *mysql.ConnParams, query string, expectError string) *sqltypes.Result {
t.Helper()
@@ -344,3 +352,68 @@ func WaitForThrottledTimestamp(t *testing.T, vtParams *mysql.ConnParams, uuid st
t.Error("timeout waiting for last_throttled_timestamp to have nonempty value")
return
}
+
+// ValidateSequentialMigrationIDs validates that schem_migrations.id column, which is an AUTO_INCREMENT, does
+// not have gaps
+func ValidateSequentialMigrationIDs(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard) {
+ r := VtgateExecQuery(t, vtParams, "show vitess_migrations", "")
+ shardMin := map[string]uint64{}
+ shardMax := map[string]uint64{}
+ shardCount := map[string]uint64{}
+
+ for _, row := range r.Named().Rows {
+ id := row.AsUint64("id", 0)
+ require.NotZero(t, id)
+
+ shard := row.AsString("shard", "")
+ require.NotEmpty(t, shard)
+
+ if _, ok := shardMin[shard]; !ok {
+ shardMin[shard] = id
+ shardMax[shard] = id
+ }
+ if id < shardMin[shard] {
+ shardMin[shard] = id
+ }
+ if id > shardMax[shard] {
+ shardMax[shard] = id
+ }
+ shardCount[shard]++
+ }
+ require.NotEmpty(t, shards)
+ assert.Equal(t, len(shards), len(shardMin))
+ assert.Equal(t, len(shards), len(shardMax))
+ assert.Equal(t, len(shards), len(shardCount))
+ for shard, count := range shardCount {
+ assert.NotZero(t, count)
+ assert.Equalf(t, count, shardMax[shard]-shardMin[shard]+1, "mismatch: shared=%v, count=%v, min=%v, max=%v", shard, count, shardMin[shard], shardMax[shard])
+ }
+}
+
+// ValidateCompletedTimestamp ensures that any migration in `cancelled`, `completed`, `failed` statuses
+// has a non-nil and valid `completed_timestamp` value.
+func ValidateCompletedTimestamp(t *testing.T, vtParams *mysql.ConnParams) {
+ require.False(t, testsStartupTime.IsZero())
+ r := VtgateExecQuery(t, vtParams, "show vitess_migrations", "")
+
+ completedTimestampNumValidations := 0
+ for _, row := range r.Named().Rows {
+ migrationStatus := row.AsString("migration_status", "")
+ require.NotEmpty(t, migrationStatus)
+ switch migrationStatus {
+ case string(schema.OnlineDDLStatusComplete),
+ string(schema.OnlineDDLStatusFailed),
+ string(schema.OnlineDDLStatusCancelled):
+ {
+ assert.False(t, row["completed_timestamp"].IsNull())
+ // Also make sure the timestamp is "real", and that it is recent.
+ timestamp := row.AsString("completed_timestamp", "")
+ completedTime, err := time.Parse(sqltypes.TimestampFormat, timestamp)
+ assert.NoError(t, err)
+ assert.Greater(t, completedTime.Unix(), testsStartupTime.Unix())
+ completedTimestampNumValidations++
+ }
+ }
+ }
+ assert.NotZero(t, completedTimestampNumValidations)
+}
diff --git a/go/test/endtoend/onlineddl/vttablet_util.go b/go/test/endtoend/onlineddl/vttablet_util.go
index 4d4e88b8189..893d312d977 100644
--- a/go/test/endtoend/onlineddl/vttablet_util.go
+++ b/go/test/endtoend/onlineddl/vttablet_util.go
@@ -31,7 +31,7 @@ import (
)
// WaitForVReplicationStatus waits for a vreplication stream to be in one of given states, or timeout
-func WaitForVReplicationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, timeout time.Duration, expectStatuses ...string) (status string) {
+func WaitForVReplicationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, timeout time.Duration, expectStatuses ...string) (status string) { // nolint:revive
query, err := sqlparser.ParseAndBind("select workflow, state from _vt.vreplication where workflow=%a",
sqltypes.StringBindVariable(uuid),
diff --git a/go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go b/go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go
deleted file mode 100644
index ab0020d3734..00000000000
--- a/go/test/endtoend/recovery/pitrtls/shardedpitr_tls_test.go
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package pitrtls
-
-import (
- "context"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "os"
- "os/exec"
- "path"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/test/endtoend/cluster"
- "vitess.io/vitess/go/vt/log"
-)
-
-var (
- createTable = `create table product (id bigint(20) primary key, name char(10), created bigint(20));`
- insertTable = `insert into product (id, name, created) values(%d, '%s', unix_timestamp());`
- getCountID = `select count(*) from product`
-)
-
-var (
- clusterInstance *cluster.LocalProcessCluster
-
- primary *cluster.Vttablet
- replica *cluster.Vttablet
- shard0Primary *cluster.Vttablet
- shard0Replica *cluster.Vttablet
- shard1Primary *cluster.Vttablet
- shard1Replica *cluster.Vttablet
-
- cell = "zone1"
- hostname = "localhost"
- keyspaceName = "ks"
- restoreKS1Name = "restoreks1"
- restoreKS2Name = "restoreks2"
- restoreKS3Name = "restoreks3"
- shardName = "0"
- shard0Name = "-80"
- shard1Name = "80-"
- dbName = "vt_ks"
- mysqlUserName = "vt_dba"
- mysqlPassword = "password"
- vSchema = `{
- "sharded": true,
- "vindexes": {
- "hash_index": {
- "type": "hash"
- }
- },
- "tables": {
- "product": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash_index"
- }
- ]
- }
- }
- }`
- commonTabletArg = []string{
- "--vreplication_healthcheck_topology_refresh", "1s",
- "--vreplication_healthcheck_retry_delay", "1s",
- "--vreplication_retry_delay", "1s",
- "--degraded_threshold", "5s",
- "--lock_tables_timeout", "5s",
- "--watch_replication_stream",
- "--serving_state_grace_period", "1s"}
-)
-
-func removeTablets(t *testing.T, tablets []*cluster.Vttablet) {
- var mysqlProcs []*exec.Cmd
- for _, tablet := range tablets {
- proc, _ := tablet.MysqlctlProcess.StopProcess()
- mysqlProcs = append(mysqlProcs, proc)
- }
- for _, proc := range mysqlProcs {
- err := proc.Wait()
- require.NoError(t, err)
- }
- for _, tablet := range tablets {
- tablet.VttabletProcess.TearDown()
- }
-}
-
-func initializeCluster(t *testing.T) {
- clusterInstance = cluster.NewCluster(cell, hostname)
-
- // Start topo server
- err := clusterInstance.StartTopo()
- require.NoError(t, err)
-
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- }
- clusterInstance.Keyspaces = append(clusterInstance.Keyspaces, *keyspace)
-
- shard := &cluster.Shard{
- Name: shardName,
- }
- shard0 := &cluster.Shard{
- Name: shard0Name,
- }
- shard1 := &cluster.Shard{
- Name: shard1Name,
- }
-
- // Defining all the tablets
- primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- replica = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard0Primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard0Replica = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard1Primary = clusterInstance.NewVttabletInstance("replica", 0, "")
- shard1Replica = clusterInstance.NewVttabletInstance("replica", 0, "")
-
- shard.Vttablets = []*cluster.Vttablet{primary, replica}
- shard0.Vttablets = []*cluster.Vttablet{shard0Primary, shard0Replica}
- shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica}
-
- clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...)
- clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup")
-
- err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard, *shard0, *shard1})
- require.NoError(t, err)
- vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory)
- out, err := vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync")
- require.NoError(t, err, out)
- // Start MySql
- var mysqlCtlProcessList []*exec.Cmd
- for _, shard := range clusterInstance.Keyspaces[0].Shards {
- for _, tablet := range shard.Vttablets {
- tablet.MysqlctlProcess.SecureTransport = true
- proc, err := tablet.MysqlctlProcess.StartProcess()
- require.NoError(t, err)
- mysqlCtlProcessList = append(mysqlCtlProcessList, proc)
- }
- }
-
- // Wait for mysql processes to start
- for _, proc := range mysqlCtlProcessList {
- err = proc.Wait()
- require.NoError(t, err)
- }
-
- queryCmds := []string{
- fmt.Sprintf("CREATE USER '%s'@'%%' IDENTIFIED BY '%s';", mysqlUserName, mysqlPassword),
- fmt.Sprintf("GRANT ALL ON *.* TO '%s'@'%%';", mysqlUserName),
- fmt.Sprintf("GRANT GRANT OPTION ON *.* TO '%s'@'%%';", mysqlUserName),
- fmt.Sprintf("create database %s;", "vt_ks"),
- "FLUSH PRIVILEGES;",
- }
-
- for _, tablet := range []*cluster.Vttablet{primary, replica, shard0Primary, shard0Replica, shard1Primary, shard1Replica} {
- for _, query := range queryCmds {
- _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false)
- require.NoError(t, err)
- }
-
- err = tablet.VttabletProcess.Setup()
- require.NoError(t, err)
- }
-
- err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID)
- require.NoError(t, err)
-
- // Start vtgate
- err = clusterInstance.StartVtgate()
- require.NoError(t, err)
-}
-
-func insertRow(t *testing.T, id int, productName string, isSlow bool) {
- ctx := context.Background()
- vtParams := mysql.ConnParams{
- Host: clusterInstance.Hostname,
- Port: clusterInstance.VtgateMySQLPort,
- }
- conn, err := mysql.Connect(ctx, &vtParams)
- require.NoError(t, err)
- defer conn.Close()
-
- insertSmt := fmt.Sprintf(insertTable, id, productName)
- _, err = conn.ExecuteFetch(insertSmt, 1000, true)
- require.NoError(t, err)
-
- if isSlow {
- time.Sleep(1 * time.Second)
- }
-}
-
-func createRestoreKeyspace(t *testing.T, timeToRecover, restoreKeyspaceName string) {
- output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--",
- "--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName,
- "--snapshot_time", timeToRecover, restoreKeyspaceName)
- log.Info(output)
- require.NoError(t, err)
-}
-
-// Test pitr (Point in time recovery).
-// -------------------------------------------
-// The following test will:
-// - create a shard with primary and replica
-// - run InitShardPrimary
-// - insert some data using vtgate (e.g. here we have inserted rows 1,2)
-// - verify the replication
-// - take backup of replica
-// - insert some data using vtgate (e.g. we inserted rows 3 4 5 6), while inserting row-4, note down the time (restoreTime1)
-// - perform a resharding to create 2 shards (-80, 80-), and delete the old shard
-// - insert some data using vtgate (e.g. we will insert 7 8 9 10) and verify we get required number of rows in -80, 80- shard
-// - take backup of both shards
-// - insert some more data using vtgate (e.g. we will insert 11 12 13 14 15), while inserting row-13, note down the time (restoreTime2)
-// - note down the current time (restoreTime3)
-
-// - Till now we did all the presetup for assertions
-
-// - asserting that restoring to restoreTime1 (going from 2 shards to 1 shard) is working, i.e. we should get 4 rows.
-// - asserting that while restoring if we give small timeout value, it will restore upto to the last available backup (asserting only -80 shard)
-// - asserting that restoring to restoreTime2 (going from 2 shards to 2 shards with past time) is working, it will assert for both shards
-// - asserting that restoring to restoreTime3 is working, we should get complete data after restoring, as we have in existing shards.
-func TestTLSPITRRecovery(t *testing.T) {
- defer cluster.PanicHandler(nil)
- initializeCluster(t)
- defer clusterInstance.Teardown()
-
- // Creating the table
- _, err := primary.VttabletProcess.QueryTablet(createTable, keyspaceName, true)
- require.NoError(t, err)
-
- insertRow(t, 1, "prd-1", false)
- insertRow(t, 2, "prd-2", false)
-
- cluster.VerifyRowsInTabletForTable(t, replica, keyspaceName, 2, "product")
-
- // backup the replica
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica.Alias)
- require.NoError(t, err)
-
- // check that the backup shows up in the listing
- output, err := clusterInstance.ListBackups("ks/0")
- require.NoError(t, err)
- assert.Equal(t, 1, len(output))
-
- // now insert some more data to simulate the changes after regular backup
- // every insert has some time lag/difference to simulate the time gap between rows
- // and when we recover to certain time, this time gap will be able to identify the exact eligible row
- var restoreTime1 string
- for counter := 3; counter <= 6; counter++ {
- if counter == 4 { // we want to recovery till this, so noting the time
- tm := time.Now().Add(1 * time.Second).UTC()
- restoreTime1 = tm.Format(time.RFC3339)
- }
- insertRow(t, counter, fmt.Sprintf("prd-%d", counter), true)
- }
-
- // creating restore keyspace with snapshot time as restoreTime1
- // Need to test this before resharding and we tear down the
- // original mysql replica, which we use as a binlog source
- createRestoreKeyspace(t, restoreTime1, restoreKS1Name)
-
- // Launching a recovery tablet which recovers data from the primary till the restoreTime1
- tlsTestTabletRecovery(t, replica, "2m", restoreKS1Name, "0", "INT64(4)")
-
- // starting resharding process
- tlsPerformResharding(t)
-
- for counter := 7; counter <= 10; counter++ {
- insertRow(t, counter, fmt.Sprintf("prd-%d", counter), false)
- }
-
- // wait till all the shards have required data
- cluster.VerifyRowsInTabletForTable(t, shard0Replica, keyspaceName, 6, "product")
- cluster.VerifyRowsInTabletForTable(t, shard1Replica, keyspaceName, 4, "product")
-
- // take the backup (to simulate the regular backup)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica.Alias)
- require.NoError(t, err)
- // take the backup (to simulate the regular backup)
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica.Alias)
- require.NoError(t, err)
-
- backups, err := clusterInstance.ListBackups(keyspaceName + "/-80")
- require.NoError(t, err)
- require.Equal(t, len(backups), 1)
-
- backups, err = clusterInstance.ListBackups(keyspaceName + "/80-")
- require.NoError(t, err)
- require.Equal(t, len(backups), 1)
-
- // now insert some more data to simulate the changes after regular backup
- // every insert has some time lag/difference to simulate the time gap between rows
- // and when we recover to certain time, this time gap will be able to identify the exact eligible row
- var restoreTime2 string
- for counter := 11; counter <= 15; counter++ {
- if counter == 13 { // we want to recovery till this, so noting the time
- tm := time.Now().Add(1 * time.Second).UTC()
- restoreTime2 = tm.Format(time.RFC3339)
- }
- insertRow(t, counter, fmt.Sprintf("prd-%d", counter), true)
- }
- restoreTime3 := time.Now().UTC().Format(time.RFC3339)
-
- // create restoreKeyspace with snapshot time as restoreTime2
- createRestoreKeyspace(t, restoreTime2, restoreKS2Name)
-
- // test the recovery with smaller binlog_lookup_timeout for shard0
- // since we have small lookup timeout, it will just get whatever available in the backup
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 1 | prd-1 | 1597219030 |
- // | 2 | prd-2 | 1597219030 |
- // | 3 | prd-3 | 1597219043 |
- // | 5 | prd-5 | 1597219045 |
- // | 9 | prd-9 | 1597219130 |
- // | 10 | prd-10 | 1597219130 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard0Replica, "1ms", restoreKS2Name, "-80", "INT64(6)")
-
- // test the recovery with valid binlog_lookup_timeout for shard0 and getting the data till the restoreTime2
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 1 | prd-1 | 1597219030 |
- // | 2 | prd-2 | 1597219030 |
- // | 3 | prd-3 | 1597219043 |
- // | 5 | prd-5 | 1597219045 |
- // | 9 | prd-9 | 1597219130 |
- // | 10 | prd-10 | 1597219130 |
- // | 13 | prd-13 | 1597219141 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard0Replica, "2m", restoreKS2Name, "-80", "INT64(7)")
-
- // test the recovery with valid binlog_lookup_timeout for shard1 and getting the data till the restoreTime2
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 4 | prd-4 | 1597219044 |
- // | 6 | prd-6 | 1597219046 |
- // | 7 | prd-7 | 1597219130 |
- // | 8 | prd-8 | 1597219130 |
- // | 11 | prd-11 | 1597219139 |
- // | 12 | prd-12 | 1597219140 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard1Replica, "2m", restoreKS2Name, "80-", "INT64(6)")
-
- // test the recovery with timetorecover > (timestamp of last binlog event in binlog server)
- createRestoreKeyspace(t, restoreTime3, restoreKS3Name)
-
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 1 | prd-1 | 1597219030 |
- // | 2 | prd-2 | 1597219030 |
- // | 3 | prd-3 | 1597219043 |
- // | 5 | prd-5 | 1597219045 |
- // | 9 | prd-9 | 1597219130 |
- // | 10 | prd-10 | 1597219130 |
- // | 13 | prd-13 | 1597219141 |
- // | 15 | prd-15 | 1597219142 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard0Replica, "2m", restoreKS3Name, "-80", "INT64(8)")
-
- // mysql> select * from product;
- // +----+--------+------------+
- // | id | name | created |
- // +----+--------+------------+
- // | 4 | prd-4 | 1597219044 |
- // | 6 | prd-6 | 1597219046 |
- // | 7 | prd-7 | 1597219130 |
- // | 8 | prd-8 | 1597219130 |
- // | 11 | prd-11 | 1597219139 |
- // | 12 | prd-12 | 1597219140 |
- // | 14 | prd-14 | 1597219142 |
- // +----+--------+------------+
- tlsTestTabletRecovery(t, shard1Replica, "2m", restoreKS3Name, "80-", "INT64(7)")
-}
-
-func tlsPerformResharding(t *testing.T) {
- err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
- require.NoError(t, err)
-
- err = clusterInstance.VtctlProcess.ExecuteCommand("InitShardPrimary", "--", "--force", "ks/-80", shard0Primary.Alias)
- require.NoError(t, err)
-
- err = clusterInstance.VtctlProcess.ExecuteCommand("InitShardPrimary", "--", "--force", "ks/80-", shard1Primary.Alias)
- require.NoError(t, err)
-
- // we need to create the schema, and the worker will do data copying
- for _, keyspaceShard := range []string{"ks/-80", "ks/80-"} {
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("CopySchemaShard", "ks/0", keyspaceShard)
- require.NoError(t, err)
- }
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "ks.reshardWorkflow", "0", "--", "-80,80-")
- require.NoError(t, err)
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "--", "--tablet_type=rdonly", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "--", "--tablet_type=replica", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- // then serve primary from the split shards
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchWrites", "ks.reshardWorkflow")
- require.NoError(t, err)
-
- // remove the original tablets in the original shard
- removeTablets(t, []*cluster.Vttablet{primary, replica})
-
- for _, tablet := range []*cluster.Vttablet{replica} {
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias)
- require.NoError(t, err)
- }
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", primary.Alias)
- require.NoError(t, err)
-
- // rebuild the serving graph, all mentions of the old shards should be gone
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks")
- require.NoError(t, err)
-
- // delete the original shard
- err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteShard", "ks/0")
- require.NoError(t, err)
-
- // Restart vtgate process
- err = clusterInstance.VtgateProcess.TearDown()
- require.NoError(t, err)
-
- err = clusterInstance.VtgateProcess.Setup()
- require.NoError(t, err)
-
- clusterInstance.WaitForTabletsToHealthyInVtgate()
-}
-
-func tlsTestTabletRecovery(t *testing.T, tabletForBinlogs *cluster.Vttablet, lookupTimeout, restoreKeyspaceName, shardName, expectedRows string) {
- recoveryTablet := clusterInstance.NewVttabletInstance("replica", 0, cell)
- tlsLaunchRecoveryTablet(t, recoveryTablet, tabletForBinlogs, lookupTimeout, restoreKeyspaceName, shardName)
-
- sqlRes, err := recoveryTablet.VttabletProcess.QueryTablet(getCountID, keyspaceName, true)
- require.NoError(t, err)
- assert.Equal(t, expectedRows, sqlRes.Rows[0][0].String())
-
- defer recoveryTablet.MysqlctlProcess.Stop()
- defer recoveryTablet.VttabletProcess.TearDown()
-}
-
-func tlsLaunchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, tabletForBinlogs *cluster.Vttablet, lookupTimeout, restoreKeyspaceName, shardName string) {
- tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory)
- tablet.MysqlctlProcess.SecureTransport = true
- err := tablet.MysqlctlProcess.Start()
- require.NoError(t, err)
-
- tablet.VttabletProcess = cluster.VttabletProcessInstance(
- tablet.HTTPPort,
- tablet.GrpcPort,
- tablet.TabletUID,
- clusterInstance.Cell,
- shardName,
- keyspaceName,
- clusterInstance.VtctldProcess.Port,
- tablet.Type,
- clusterInstance.TopoProcess.Port,
- clusterInstance.Hostname,
- clusterInstance.TmpDirectory,
- clusterInstance.VtTabletExtraArgs,
- clusterInstance.EnableSemiSync,
- clusterInstance.DefaultCharset)
- tablet.Alias = tablet.VttabletProcess.TabletPath
- tablet.VttabletProcess.SupportsBackup = true
- tablet.VttabletProcess.Keyspace = restoreKeyspaceName
-
- certDir := path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/ssl_%010d", tablet.MysqlctlProcess.TabletUID))
- tablet.VttabletProcess.ExtraArgs = []string{
- "--disable_active_reparents",
- "--enable_replication_reporter=false",
- "--init_db_name_override", dbName,
- "--init_tablet_type", "replica",
- "--init_keyspace", restoreKeyspaceName,
- "--init_shard", shardName,
- "--binlog_host", clusterInstance.Hostname,
- "--binlog_port", fmt.Sprintf("%d", tabletForBinlogs.MySQLPort),
- "--binlog_user", mysqlUserName,
- "--binlog_password", mysqlPassword,
- "--binlog_ssl_ca", certDir + "/ca-cert.pem",
- "--binlog_ssl_server_name", getCNFromCertPEM(certDir + "/server-001-cert.pem"),
- "--pitr_gtid_lookup_timeout", lookupTimeout,
- "--vreplication_healthcheck_topology_refresh", "1s",
- "--vreplication_healthcheck_retry_delay", "1s",
- "--vreplication_tablet_type", "replica",
- "--vreplication_retry_delay", "1s",
- "--degraded_threshold", "5s",
- "--lock_tables_timeout", "5s",
- "--watch_replication_stream",
- "--serving_state_grace_period", "1s",
- }
- tablet.VttabletProcess.ServingStatus = ""
-
- err = tablet.VttabletProcess.Setup()
- require.NoError(t, err)
-
- tablet.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 20*time.Second)
-}
-
-func getCNFromCertPEM(filename string) string {
- pemBytes, _ := os.ReadFile(filename)
- block, _ := pem.Decode(pemBytes)
- cert, _ := x509.ParseCertificate(block.Bytes)
- rdn := cert.Subject.ToRDNSequence()[0][0]
- t := rdn.Type
-
- // 2.5.4.3 is ASN OID for "CN"
- if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 && t[3] == 3 {
- return fmt.Sprintf("%s", rdn.Value)
- }
- // As good a fallback as any
- return "localhost"
-}
diff --git a/go/test/endtoend/recovery/recovery_util.go b/go/test/endtoend/recovery/recovery_util.go
index acc1d8ce616..cffae6a5005 100644
--- a/go/test/endtoend/recovery/recovery_util.go
+++ b/go/test/endtoend/recovery/recovery_util.go
@@ -51,18 +51,20 @@ func VerifyQueriesUsingVtgate(t *testing.T, session *vtgateconn.VTGateSession, q
}
// RestoreTablet performs a PITR restore.
-func RestoreTablet(t *testing.T, localCluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, restoreKSName string, shardName string, keyspaceName string, commonTabletArg []string) {
+func RestoreTablet(t *testing.T, localCluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, restoreKSName string, shardName string, keyspaceName string, commonTabletArg []string, restoreTime time.Time) {
tablet.ValidateTabletRestart(t)
replicaTabletArgs := commonTabletArg
_, err := localCluster.VtctlProcess.ExecuteCommandWithOutput("GetKeyspace", restoreKSName)
+ if restoreTime.IsZero() {
+ restoreTime = time.Now().UTC()
+ }
+
if err != nil {
- tm := time.Now().UTC()
- tm.Format(time.RFC3339)
_, err := localCluster.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--",
"--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName,
- "--snapshot_time", tm.Format(time.RFC3339), restoreKSName)
+ "--snapshot_time", restoreTime.Format(time.RFC3339), restoreKSName)
require.Nil(t, err)
}
diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go
index 68c66a7bbc0..5975a3c1c3e 100644
--- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go
+++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go
@@ -24,6 +24,7 @@ import (
"os/exec"
"path"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -172,25 +173,29 @@ SET GLOBAL old_alter_table = ON;
}
-// TestRecoveryImpl does following
-// - create a shard with primary and replica1 only
-// - run InitShardPrimary
-// - insert some data
-// - take a backup
-// - insert more data on the primary
-// - take another backup
-// - create a recovery keyspace after first backup
-// - bring up tablet_replica2 in the new keyspace
-// - check that new tablet does not have data created after backup1
-// - create second recovery keyspace after second backup
-// - bring up tablet_replica3 in second keyspace
-// - check that new tablet has data created after backup1 but not data created after backup2
-// - check that vtgate queries work correctly
+// 1. create a shard with primary and replica1 only
+// - run InitShardPrimary
+// - insert some data
+//
+// 2. take a backup
+// 3.create a recovery keyspace after first backup
+// - bring up tablet_replica2 in the new keyspace
+// - check that new tablet has data from backup1
+//
+// 4. insert more data on the primary
+//
+// 5. take another backup
+// 6. create a recovery keyspace after second backup
+// - bring up tablet_replica3 in the new keyspace
+// - check that new tablet has data from backup2
+//
+// 7. check that vtgate queries work correctly
func TestRecoveryImpl(t *testing.T) {
defer cluster.PanicHandler(t)
defer tabletsTeardown()
verifyInitialReplication(t)
+ // take first backup of value = test1
err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
@@ -198,10 +203,6 @@ func TestRecoveryImpl(t *testing.T) {
require.Equal(t, len(backups), 1)
assert.Contains(t, backups[0], replica1.Alias)
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
- assert.NoError(t, err)
- cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
-
err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
assert.NoError(t, err)
@@ -209,66 +210,81 @@ func TestRecoveryImpl(t *testing.T) {
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
- recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg)
+ // restore with latest backup
+ restoreTime := time.Now().UTC()
+ recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg, restoreTime)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell)
assert.NoError(t, err)
assert.Contains(t, output, keyspaceName)
assert.Contains(t, output, recoveryKS1)
- err = localCluster.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, keyspaceName)
- assert.NoError(t, err)
-
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
-
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 1)
cluster.VerifyLocalMetadata(t, replica2, recoveryKS1, shardName, cell)
+ // verify that restored replica has value = test1
+ qr, err := replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
+ assert.NoError(t, err)
+ assert.Equal(t, "test1", qr.Rows[0][0].ToString())
+
+ // insert new row on primary
+ _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
+ assert.NoError(t, err)
+ cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
+
// update the original row in primary
_, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx1' where id = 1", keyspaceName, true)
assert.NoError(t, err)
- //verify that primary has new value
- qr, err := primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
+ // verify that primary has new value
+ qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx1", qr.Rows[0][0].ToString())
- //verify that restored replica has old value
- qr, err = replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
- assert.NoError(t, err)
- assert.Equal(t, "test1", qr.Rows[0][0].ToString())
+ // check that replica1, used for the backup, has the new value
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
- err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
- assert.NoError(t, err)
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
- _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true)
- assert.NoError(t, err)
- cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3)
+ for {
+ qr, err = replica1.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
+ assert.NoError(t, err)
+ if qr.Rows[0][0].ToString() == "msgx1" {
+ break
+ }
- recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg)
+ select {
+ case <-ctx.Done():
+ t.Error("timeout waiting for new value to be replicated on replica 1")
+ break
+ case <-ticker.C:
+ }
+ }
- output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2)
+ // take second backup of value = msgx1
+ err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
- assert.Contains(t, output, "vt_insert_test")
- cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 2)
+ // restore to first backup
+ recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg, restoreTime)
- // update the original row in primary
- _, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx2' where id = 1", keyspaceName, true)
+ output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2)
assert.NoError(t, err)
+ assert.Contains(t, output, "vt_insert_test")
- //verify that primary has new value
- qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
- assert.NoError(t, err)
- assert.Equal(t, "msgx2", qr.Rows[0][0].ToString())
+ // only one row from first backup
+ cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 1)
- //verify that restored replica has old value
+ // verify that restored replica has value = test1
qr, err = replica3.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
- assert.Equal(t, "msgx1", qr.Rows[0][0].ToString())
+ assert.Equal(t, "test1", qr.Rows[0][0].ToString())
vtgateInstance := localCluster.NewVtgateInstance()
vtgateInstance.TabletTypesToWait = "REPLICA"
@@ -276,15 +292,10 @@ func TestRecoveryImpl(t *testing.T) {
localCluster.VtgateGrpcPort = vtgateInstance.GrpcPort
assert.NoError(t, err)
defer vtgateInstance.TearDown()
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1)
- assert.NoError(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1)
- assert.NoError(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1)
- assert.NoError(t, err)
- err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1)
- assert.NoError(t, err)
-
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1, 30*time.Second))
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1, 30*time.Second))
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1, 30*time.Second))
+ assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1, 30*time.Second))
// Build vtgate grpc connection
grpcAddress := fmt.Sprintf("%s:%d", localCluster.Hostname, localCluster.VtgateGrpcPort)
vtgateConn, err := vtgateconn.Dial(context.Background(), grpcAddress)
@@ -292,27 +303,27 @@ func TestRecoveryImpl(t *testing.T) {
defer vtgateConn.Close()
session := vtgateConn.Session("@replica", nil)
- //check that vtgate doesn't route queries to new tablet
- recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(3)")
- recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx2")`)
+ // check that vtgate doesn't route queries to new tablet
+ recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
+ recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx1")`)
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS1), "INT64(1)")
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS1), `VARCHAR("test1")`)
- recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(2)")
- recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("msgx1")`)
+ recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(1)")
+ recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("test1")`)
// check that new keyspace is accessible with 'use ks'
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS1+"@replica")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS2+"@replica")
- recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
+ recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
// check that new tablet is accessible with use `ks:shard`
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS1+":0@replica`")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS2+":0@replica`")
- recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
+ recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
}
// verifyInitialReplication will create schema in primary, insert some data to primary and verify the same data in replica.
diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go
index 3564ba3badb..4828731b444 100644
--- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go
+++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go
@@ -387,10 +387,12 @@ func TestERSForInitialization(t *testing.T) {
shard.Vttablets = tablets
clusterInstance.VtTabletExtraArgs = []string{
"--lock_tables_timeout", "5s",
- "--enable_semi_sync",
"--init_populate_metadata",
"--track_schema_versions=true",
}
+ if clusterInstance.VtTabletMajorVersion <= 15 {
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--enable_semi_sync")
+ }
// Initialize Cluster
err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard})
diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go
index b450fb44420..1de1adf0430 100644
--- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go
+++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go
@@ -17,155 +17,91 @@ limitations under the License.
package newfeaturetest
import (
- "strconv"
+ "context"
+ "fmt"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "google.golang.org/protobuf/encoding/protojson"
-
- "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
- replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
)
-// TestCrossCellDurability tests 2 things -
-// 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected
-// 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any external interference
-func TestCrossCellDurability(t *testing.T) {
+// TestRecoverWithMultipleVttabletFailures tests that ERS succeeds with the default values
+// even when there are multiple vttablet failures. In this test we use the semi_sync policy
+// to allow multiple failures to happen and still be recoverable.
+// The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the
+// default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag.
+func TestRecoverWithMultipleVttabletFailures(t *testing.T) {
defer cluster.PanicHandler(t)
- clusterInstance := utils.SetupReparentCluster(t, "cross_cell")
+ clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
-
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
- // When tablets[0] is the primary, the only tablet in a different cell is tablets[3].
- // So the other two should have semi-sync turned off
- utils.CheckSemiSyncSetupCorrectly(t, tablets[0], "ON")
- utils.CheckSemiSyncSetupCorrectly(t, tablets[3], "ON")
- utils.CheckSemiSyncSetupCorrectly(t, tablets[1], "OFF")
- utils.CheckSemiSyncSetupCorrectly(t, tablets[2], "OFF")
+ // make tablets[1] a rdonly tablet.
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly")
+ require.NoError(t, err)
- // Run forced reparent operation, this should proceed unimpeded.
- out, err := utils.Prs(t, clusterInstance, tablets[3])
- require.NoError(t, err, out)
+ // Confirm that replication is still working as intended
+ utils.ConfirmReplication(t, tablets[0], tablets[1:])
- utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2]})
+ // Make the rdonly and primary tablets and databases unavailable.
+ utils.StopTablet(t, tablets[1], true)
+ utils.StopTablet(t, tablets[0], true)
- // All the tablets will have semi-sync setup since tablets[3] is in Cell2 and all
- // others are in Cell1, so all of them are eligible to send semi-sync ACKs
- for _, tablet := range tablets {
- utils.CheckSemiSyncSetupCorrectly(t, tablet, "ON")
- }
+ // We expect this to succeed since we only have 1 primary eligible tablet which is down
+ out, err := utils.Ers(clusterInstance, nil, "", "")
+ require.NoError(t, err, out)
- for i, supportsBackup := range []bool{false, true} {
- // Bring up a new replica tablet
- // In this new tablet, we do not disable active reparents, otherwise replication will not be started.
- newReplica := utils.StartNewVTTablet(t, clusterInstance, 300+i, supportsBackup)
- // Add the tablet to the list of tablets in this shard
- clusterInstance.Keyspaces[0].Shards[0].Vttablets = append(clusterInstance.Keyspaces[0].Shards[0].Vttablets, newReplica)
- // Check that we can replicate to it and semi-sync is setup correctly on it
- utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2], newReplica})
- utils.CheckSemiSyncSetupCorrectly(t, newReplica, "ON")
- }
+ newPrimary := utils.GetNewPrimary(t, clusterInstance)
+ utils.ConfirmReplication(t, newPrimary, []*cluster.Vttablet{tablets[2], tablets[3]})
}
-// TestFullStatus tests that the RPC FullStatus works as intended.
-func TestFullStatus(t *testing.T) {
+// TestTabletRestart tests that a running tablet can be restarted and everything is still fine
+func TestTabletRestart(t *testing.T) {
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
- utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
- // Check that full status gives the correct result for a primary tablet
- primaryStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablets[0].Alias)
- require.NoError(t, err)
- primaryStatus := &replicationdatapb.FullStatus{}
- err = protojson.Unmarshal([]byte(primaryStatusString), primaryStatus)
+ utils.StopTablet(t, tablets[1], false)
+ tablets[1].VttabletProcess.ServingStatus = "SERVING"
+ err := tablets[1].VttabletProcess.Setup()
require.NoError(t, err)
- assert.NotEmpty(t, primaryStatus.ServerUuid)
- assert.NotEmpty(t, primaryStatus.ServerId)
- // For a primary tablet there is no replication status
- assert.Nil(t, primaryStatus.ReplicationStatus)
- assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin")
- assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/")
- assert.False(t, primaryStatus.ReadOnly)
- assert.True(t, primaryStatus.SemiSyncPrimaryEnabled)
- assert.True(t, primaryStatus.SemiSyncReplicaEnabled)
- assert.True(t, primaryStatus.SemiSyncPrimaryStatus)
- assert.False(t, primaryStatus.SemiSyncReplicaStatus)
- assert.EqualValues(t, 3, primaryStatus.SemiSyncPrimaryClients)
- assert.EqualValues(t, 1000000000000000000, primaryStatus.SemiSyncPrimaryTimeout)
- assert.EqualValues(t, 1, primaryStatus.SemiSyncWaitForReplicaCount)
- assert.Equal(t, "ROW", primaryStatus.BinlogFormat)
- assert.Equal(t, "FULL", primaryStatus.BinlogRowImage)
- assert.Equal(t, "ON", primaryStatus.GtidMode)
- assert.True(t, primaryStatus.LogReplicaUpdates)
- assert.True(t, primaryStatus.LogBinEnabled)
- assert.Regexp(t, `[58]\.[07].*`, primaryStatus.Version)
- assert.NotEmpty(t, primaryStatus.VersionComment)
-
- // Check that full status gives the correct result for a replica tablet
- replicaStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablets[1].Alias)
- require.NoError(t, err)
- replicaStatus := &replicationdatapb.FullStatus{}
- err = protojson.Unmarshal([]byte(replicaStatusString), replicaStatus)
- require.NoError(t, err)
- assert.NotEmpty(t, replicaStatus.ServerUuid)
- assert.NotEmpty(t, replicaStatus.ServerId)
- assert.Contains(t, replicaStatus.ReplicationStatus.Position, "MySQL56/"+replicaStatus.ReplicationStatus.SourceUuid)
- assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState)
- assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState)
- assert.Equal(t, fileNameFromPosition(replicaStatus.ReplicationStatus.FilePosition), fileNameFromPosition(primaryStatus.PrimaryStatus.FilePosition))
- assert.LessOrEqual(t, rowNumberFromPosition(replicaStatus.ReplicationStatus.FilePosition), rowNumberFromPosition(primaryStatus.PrimaryStatus.FilePosition))
- assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition, primaryStatus.PrimaryStatus.FilePosition)
- assert.Contains(t, replicaStatus.ReplicationStatus.RelayLogFilePosition, "vt-0000000102-relay")
- assert.Equal(t, replicaStatus.ReplicationStatus.Position, primaryStatus.PrimaryStatus.Position)
- assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogPosition, primaryStatus.PrimaryStatus.Position)
- assert.Empty(t, replicaStatus.ReplicationStatus.LastIoError)
- assert.Empty(t, replicaStatus.ReplicationStatus.LastSqlError)
- assert.Equal(t, replicaStatus.ReplicationStatus.SourceUuid, primaryStatus.ServerUuid)
- assert.LessOrEqual(t, int(replicaStatus.ReplicationStatus.ReplicationLagSeconds), 1)
- assert.False(t, replicaStatus.ReplicationStatus.ReplicationLagUnknown)
- assert.EqualValues(t, 0, replicaStatus.ReplicationStatus.SqlDelay)
- assert.False(t, replicaStatus.ReplicationStatus.SslAllowed)
- assert.False(t, replicaStatus.ReplicationStatus.HasReplicationFilters)
- assert.False(t, replicaStatus.ReplicationStatus.UsingGtid)
- assert.True(t, replicaStatus.ReplicationStatus.AutoPosition)
- assert.Equal(t, replicaStatus.ReplicationStatus.SourceHost, utils.Hostname)
- assert.EqualValues(t, replicaStatus.ReplicationStatus.SourcePort, tablets[0].MySQLPort)
- assert.Equal(t, replicaStatus.ReplicationStatus.SourceUser, "vt_repl")
- assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin")
- assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/")
- assert.True(t, replicaStatus.ReadOnly)
- assert.False(t, replicaStatus.SemiSyncPrimaryEnabled)
- assert.True(t, replicaStatus.SemiSyncReplicaEnabled)
- assert.False(t, replicaStatus.SemiSyncPrimaryStatus)
- assert.True(t, replicaStatus.SemiSyncReplicaStatus)
- assert.EqualValues(t, 0, replicaStatus.SemiSyncPrimaryClients)
- assert.EqualValues(t, 1000000000000000000, replicaStatus.SemiSyncPrimaryTimeout)
- assert.EqualValues(t, 1, replicaStatus.SemiSyncWaitForReplicaCount)
- assert.Equal(t, "ROW", replicaStatus.BinlogFormat)
- assert.Equal(t, "FULL", replicaStatus.BinlogRowImage)
- assert.Equal(t, "ON", replicaStatus.GtidMode)
- assert.True(t, replicaStatus.LogReplicaUpdates)
- assert.True(t, replicaStatus.LogBinEnabled)
- assert.Regexp(t, `[58]\.[07].*`, replicaStatus.Version)
- assert.NotEmpty(t, replicaStatus.VersionComment)
}
-// fileNameFromPosition gets the file name from the position
-func fileNameFromPosition(pos string) string {
- return pos[0 : len(pos)-4]
-}
+// Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded.
+func TestChangeTypeWithoutSemiSync(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ clusterInstance := utils.SetupReparentCluster(t, "none")
+ defer utils.TeardownCluster(clusterInstance)
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ ctx := context.Background()
+
+ primary, replica := tablets[0], tablets[1]
+
+ // Unload semi sync plugins
+ for _, tablet := range tablets[0:4] {
+ qr := utils.RunSQL(ctx, t, "select @@global.super_read_only", tablet)
+ result := fmt.Sprintf("%v", qr.Rows[0][0].ToString())
+ if result == "1" {
+ utils.RunSQL(ctx, t, "set global super_read_only = 0", tablet)
+ }
-// rowNumberFromPosition gets the row number from the position
-func rowNumberFromPosition(pos string) int {
- rowNumStr := pos[len(pos)-4:]
- rowNum, _ := strconv.Atoi(rowNumStr)
- return rowNum
+ utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_slave;", tablet)
+ utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_master;", tablet)
+ }
+
+ utils.ValidateTopology(t, clusterInstance, true)
+ utils.CheckPrimaryTablet(t, clusterInstance, primary)
+
+ // Change replica's type to rdonly
+ err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly")
+ require.NoError(t, err)
+
+ // Change tablets type from rdonly back to replica
+ err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica")
+ require.NoError(t, err)
}
diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
index 66db2908380..1e47c6d3c64 100644
--- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go
+++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
@@ -19,15 +19,21 @@ package plannedreparent
import (
"context"
"fmt"
+ "os"
+ "strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/encoding/protojson"
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
+ utilstest "vitess.io/vitess/go/test/endtoend/utils"
"vitess.io/vitess/go/vt/log"
+ replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
)
func TestPrimaryToSpareStateChangeImpossible(t *testing.T) {
@@ -183,13 +189,13 @@ func TestReparentFromOutsideWithNoPrimary(t *testing.T) {
}
func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessCluster, downPrimary bool) {
- //This test will start a primary and 3 replicas.
- //Then:
- //- one replica will be the new primary
- //- one replica will be reparented to that new primary
- //- one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
- //Args:
- //downPrimary: kills the old primary first
+ // This test will start a primary and 3 replicas.
+ // Then:
+ // - one replica will be the new primary
+ // - one replica will be reparented to that new primary
+ // - one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
+ // Args:
+ // downPrimary: kills the old primary first
ctx := context.Background()
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
@@ -202,7 +208,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus
demoteCommands := "SET GLOBAL read_only = ON; FLUSH TABLES WITH READ LOCK; UNLOCK TABLES"
utils.RunSQL(ctx, t, demoteCommands, tablets[0])
- //Get the position of the old primary and wait for the new one to catch up.
+ // Get the position of the old primary and wait for the new one to catch up.
err := utils.WaitForReplicationPosition(t, tablets[0], tablets[1])
require.NoError(t, err)
}
@@ -359,9 +365,16 @@ func TestChangeTypeSemiSync(t *testing.T) {
}
func TestReparentDoesntHangIfPrimaryFails(t *testing.T) {
+ utilstest.SkipIfBinaryIsAboveVersion(t, 15, "vttablet")
+
defer cluster.PanicHandler(t)
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
defer utils.TeardownCluster(clusterInstance)
+
+ // This test is no longer valid post v16
+ if clusterInstance.VtTabletMajorVersion >= 16 {
+ t.Skip("Skipping TestReparentDoesntHangIfPrimaryFails in CI environment for v16")
+ }
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
// Change the schema of the _vt.reparent_journal table, so that
@@ -369,10 +382,155 @@ func TestReparentDoesntHangIfPrimaryFails(t *testing.T) {
_, err := tablets[0].VttabletProcess.QueryTabletWithDB(
"ALTER TABLE reparent_journal DROP COLUMN replication_position", "_vt")
require.NoError(t, err)
-
// Perform a planned reparent operation, the primary will fail the
// insert. The replicas should then abort right away.
out, err := utils.Prs(t, clusterInstance, tablets[1])
require.Error(t, err)
assert.Contains(t, out, "primary failed to PopulateReparentJournal")
}
+
+// TestCrossCellDurability tests 2 things -
+// 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected
+// 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention
+func TestCrossCellDurability(t *testing.T) {
+ if os.Getenv("SKIPTESTCROSSCELLDURABILITY") == "1" {
+ t.Log("skipping due to SKIPTESTCROSSCELLDURABILITY=1")
+ return
+ }
+ defer cluster.PanicHandler(t)
+ clusterInstance := utils.SetupReparentCluster(t, "cross_cell")
+ defer utils.TeardownCluster(clusterInstance)
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
+
+ // When tablets[0] is the primary, the only tablet in a different cell is tablets[3].
+ // So the other two should have semi-sync turned off
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[0], "ON")
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[3], "ON")
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[1], "OFF")
+ utils.CheckSemiSyncSetupCorrectly(t, tablets[2], "OFF")
+
+ // Run forced reparent operation, this should proceed unimpeded.
+ out, err := utils.Prs(t, clusterInstance, tablets[3])
+ require.NoError(t, err, out)
+
+ utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2]})
+
+ // All the tablets will have semi-sync setup since tablets[3] is in Cell2 and all
+ // others are in Cell1, so all of them are eligible to send semi-sync ACKs
+ for _, tablet := range tablets {
+ utils.CheckSemiSyncSetupCorrectly(t, tablet, "ON")
+ }
+
+ for i, supportsBackup := range []bool{false, true} {
+ // Bring up a new replica tablet
+ // In this new tablet, we do not disable active reparents, otherwise replication will not be started.
+ newReplica := utils.StartNewVTTablet(t, clusterInstance, 300+i, supportsBackup)
+ // Add the tablet to the list of tablets in this shard
+ clusterInstance.Keyspaces[0].Shards[0].Vttablets = append(clusterInstance.Keyspaces[0].Shards[0].Vttablets, newReplica)
+ // Check that we can replicate to it and semi-sync is setup correctly on it
+ utils.ConfirmReplication(t, tablets[3], []*cluster.Vttablet{tablets[0], tablets[1], tablets[2], newReplica})
+ utils.CheckSemiSyncSetupCorrectly(t, newReplica, "ON")
+ }
+}
+
+// TestFullStatus tests that the RPC FullStatus works as intended.
+func TestFullStatus(t *testing.T) {
+ defer cluster.PanicHandler(t)
+ clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
+ defer utils.TeardownCluster(clusterInstance)
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+ utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
+
+ // Check that full status gives the correct result for a primary tablet
+ primaryStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablets[0].Alias)
+ require.NoError(t, err)
+ primaryStatus := &replicationdatapb.FullStatus{}
+ err = protojson.Unmarshal([]byte(primaryStatusString), primaryStatus)
+ if err != nil {
+ t.Logf("TestFullStatus got primaryStatusString: %s", string(primaryStatusString))
+ }
+ require.NoError(t, err)
+ assert.NotEmpty(t, primaryStatus.ServerUuid)
+ assert.NotEmpty(t, primaryStatus.ServerId)
+ // For a primary tablet there is no replication status
+ assert.Nil(t, primaryStatus.ReplicationStatus)
+ assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin")
+ assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/")
+ assert.False(t, primaryStatus.ReadOnly)
+ assert.True(t, primaryStatus.SemiSyncPrimaryEnabled)
+ assert.True(t, primaryStatus.SemiSyncReplicaEnabled)
+ assert.True(t, primaryStatus.SemiSyncPrimaryStatus)
+ assert.False(t, primaryStatus.SemiSyncReplicaStatus)
+ assert.EqualValues(t, 3, primaryStatus.SemiSyncPrimaryClients)
+ assert.EqualValues(t, 1000000000000000000, primaryStatus.SemiSyncPrimaryTimeout)
+ assert.EqualValues(t, 1, primaryStatus.SemiSyncWaitForReplicaCount)
+ assert.Equal(t, "ROW", primaryStatus.BinlogFormat)
+ assert.Equal(t, "FULL", primaryStatus.BinlogRowImage)
+ assert.Equal(t, "ON", primaryStatus.GtidMode)
+ assert.True(t, primaryStatus.LogReplicaUpdates)
+ assert.True(t, primaryStatus.LogBinEnabled)
+ assert.Regexp(t, `[58]\.[07].*`, primaryStatus.Version)
+ assert.NotEmpty(t, primaryStatus.VersionComment)
+
+ // Check that full status gives the correct result for a replica tablet
+ replicaStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablets[1].Alias)
+ require.NoError(t, err)
+ replicaStatus := &replicationdatapb.FullStatus{}
+ err = protojson.Unmarshal([]byte(replicaStatusString), replicaStatus)
+ require.NoError(t, err)
+ assert.NotEmpty(t, replicaStatus.ServerUuid)
+ assert.NotEmpty(t, replicaStatus.ServerId)
+ assert.Contains(t, replicaStatus.ReplicationStatus.Position, "MySQL56/"+replicaStatus.ReplicationStatus.SourceUuid)
+ assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState)
+ assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState)
+ assert.Equal(t, fileNameFromPosition(replicaStatus.ReplicationStatus.FilePosition), fileNameFromPosition(primaryStatus.PrimaryStatus.FilePosition))
+ assert.LessOrEqual(t, rowNumberFromPosition(replicaStatus.ReplicationStatus.FilePosition), rowNumberFromPosition(primaryStatus.PrimaryStatus.FilePosition))
+ assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition, primaryStatus.PrimaryStatus.FilePosition)
+ assert.Contains(t, replicaStatus.ReplicationStatus.RelayLogFilePosition, "vt-0000000102-relay")
+ assert.Equal(t, replicaStatus.ReplicationStatus.Position, primaryStatus.PrimaryStatus.Position)
+ assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogPosition, primaryStatus.PrimaryStatus.Position)
+ assert.Empty(t, replicaStatus.ReplicationStatus.LastIoError)
+ assert.Empty(t, replicaStatus.ReplicationStatus.LastSqlError)
+ assert.Equal(t, replicaStatus.ReplicationStatus.SourceUuid, primaryStatus.ServerUuid)
+ assert.LessOrEqual(t, int(replicaStatus.ReplicationStatus.ReplicationLagSeconds), 1)
+ assert.False(t, replicaStatus.ReplicationStatus.ReplicationLagUnknown)
+ assert.EqualValues(t, 0, replicaStatus.ReplicationStatus.SqlDelay)
+ assert.False(t, replicaStatus.ReplicationStatus.SslAllowed)
+ assert.False(t, replicaStatus.ReplicationStatus.HasReplicationFilters)
+ assert.False(t, replicaStatus.ReplicationStatus.UsingGtid)
+ assert.True(t, replicaStatus.ReplicationStatus.AutoPosition)
+ assert.Equal(t, replicaStatus.ReplicationStatus.SourceHost, utils.Hostname)
+ assert.EqualValues(t, replicaStatus.ReplicationStatus.SourcePort, tablets[0].MySQLPort)
+ assert.Equal(t, replicaStatus.ReplicationStatus.SourceUser, "vt_repl")
+ assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin")
+ assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/")
+ assert.True(t, replicaStatus.ReadOnly)
+ assert.False(t, replicaStatus.SemiSyncPrimaryEnabled)
+ assert.True(t, replicaStatus.SemiSyncReplicaEnabled)
+ assert.False(t, replicaStatus.SemiSyncPrimaryStatus)
+ assert.True(t, replicaStatus.SemiSyncReplicaStatus)
+ assert.EqualValues(t, 0, replicaStatus.SemiSyncPrimaryClients)
+ assert.EqualValues(t, 1000000000000000000, replicaStatus.SemiSyncPrimaryTimeout)
+ assert.EqualValues(t, 1, replicaStatus.SemiSyncWaitForReplicaCount)
+ assert.Equal(t, "ROW", replicaStatus.BinlogFormat)
+ assert.Equal(t, "FULL", replicaStatus.BinlogRowImage)
+ assert.Equal(t, "ON", replicaStatus.GtidMode)
+ assert.True(t, replicaStatus.LogReplicaUpdates)
+ assert.True(t, replicaStatus.LogBinEnabled)
+ assert.Regexp(t, `[58]\.[07].*`, replicaStatus.Version)
+ assert.NotEmpty(t, replicaStatus.VersionComment)
+}
+
+// fileNameFromPosition gets the file name from the position
+func fileNameFromPosition(pos string) string {
+ return pos[0 : len(pos)-4]
+}
+
+// rowNumberFromPosition gets the row number from the position
+func rowNumberFromPosition(pos string) int {
+ rowNumStr := pos[len(pos)-4:]
+ rowNum, _ := strconv.Atoi(rowNumStr)
+ return rowNum
+}
diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go
new file mode 100644
index 00000000000..82010a6a19c
--- /dev/null
+++ b/go/test/endtoend/reparent/prscomplex/main_test.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package misc
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ rutils "vitess.io/vitess/go/test/endtoend/reparent/utils"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "test"
+
+ //go:embed schema.sql
+ schemaSQL string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: schemaSQL,
+ }
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
+ "--queryserver-config-query-timeout=9000",
+ "--queryserver-config-pool-size=3",
+ "--queryserver-config-stream-pool-size=3",
+ "--queryserver-config-transaction-cap=2",
+ "--queryserver-config-transaction-timeout=20",
+ "--shutdown_grace_period=3",
+ "--queryserver-config-schema-change-signal=false")
+ err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false)
+ if err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--planner-version=gen4",
+ "--mysql_default_workload=olap",
+ "--schema_change_signal=false")
+ err = clusterInstance.StartVtgate()
+ if err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+/*
+TestAcquireSameConnID tests that a query started on a connection gets reconnected with a new connection.
+Another query acquires the old connection ID and does not override the query list maintained by the vttablet process.
+PRS should not fail as the query list is maintained appropriately.
+*/
+func TestAcquireSameConnID(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err != nil {
+ require.Equal(t, "Fail in goroutine after TestAcquireSameConnID has completed", err)
+ }
+ }()
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // start a reserved connection
+ utils.Exec(t, conn, "set sql_mode=''")
+ _ = utils.Exec(t, conn, "select connection_id()")
+
+ // restart the mysql to trigger reconnect on next query.
+ primTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()
+ err = primTablet.MysqlctlProcess.Stop()
+ require.NoError(t, err)
+ err = primTablet.MysqlctlProcess.StartProvideInit(false)
+ require.NoError(t, err)
+
+ go func() {
+ // this will trigger reconnect with a new connection id, which will be lower than the origin connection id.
+ _, _ = utils.ExecAllowError(t, conn, "select connection_id(), sleep(4000)")
+ }()
+ time.Sleep(5 * time.Second)
+
+ totalErrCount := 0
+ // run through 100 times to acquire new connection, this might override the original connection id.
+ var conn2 *mysql.Conn
+ for i := 0; i < 100; i++ {
+ conn2, err = mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+
+ utils.Exec(t, conn2, "set sql_mode=''")
+ // ReserveExecute
+ _, err = utils.ExecAllowError(t, conn2, "select connection_id()")
+ if err != nil {
+ totalErrCount++
+ }
+ // Execute
+ _, err = utils.ExecAllowError(t, conn2, "select connection_id()")
+ if err != nil {
+ totalErrCount++
+ }
+ }
+
+ // We run the above loop 100 times so we execute 200 queries, off which only some should fail due to MySQL restart.
+ assert.Less(t, totalErrCount, 10, "MySQL restart can cause some errors, but not too many.")
+
+ // prs should happen without any error.
+ text, err := rutils.Prs(t, clusterInstance, clusterInstance.Keyspaces[0].Shards[0].Replica())
+ require.NoError(t, err, text)
+}
diff --git a/go/test/endtoend/reparent/prscomplex/schema.sql b/go/test/endtoend/reparent/prscomplex/schema.sql
new file mode 100644
index 00000000000..3e78cab09d6
--- /dev/null
+++ b/go/test/endtoend/reparent/prscomplex/schema.sql
@@ -0,0 +1,5 @@
+create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go
new file mode 100644
index 00000000000..a9f4312caea
--- /dev/null
+++ b/go/test/endtoend/reparent/prssettingspool/main_test.go
@@ -0,0 +1,148 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package misc
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ rutils "vitess.io/vitess/go/test/endtoend/reparent/utils"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "test"
+
+ //go:embed schema.sql
+ schemaSQL string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: schemaSQL,
+ }
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
+ "--queryserver-enable-settings-pool")
+ err = clusterInstance.StartUnshardedKeyspace(*keyspace, 2, false)
+ if err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--planner-version", "gen4")
+ err = clusterInstance.StartVtgate()
+ if err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+func TestSettingsPoolWithTXAndPRS(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // set a system settings that will trigger reserved connection usage.
+ utils.Exec(t, conn, "set default_week_format = 5")
+
+ // have transaction on the session
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "select id1, id2 from t1")
+ utils.Exec(t, conn, "commit")
+
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ // prs should happen without any error.
+ text, err := rutils.Prs(t, clusterInstance, tablets[1])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute)
+
+ defer func() {
+ // reset state
+ text, err = rutils.Prs(t, clusterInstance, tablets[0])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute)
+ }()
+
+ // no error should occur and it should go to the right tablet.
+ utils.Exec(t, conn, "select id1, id2 from t1")
+}
+
+func TestSettingsPoolWithoutTXAndPRS(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ // set a system settings that will trigger reserved connection usage.
+ utils.Exec(t, conn, "set default_week_format = 5")
+
+ // execute non-tx query
+ utils.Exec(t, conn, "select id1, id2 from t1")
+
+ tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
+
+ // prs should happen without any error.
+ text, err := rutils.Prs(t, clusterInstance, tablets[1])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute)
+ defer func() {
+ // reset state
+ text, err = rutils.Prs(t, clusterInstance, tablets[0])
+ require.NoError(t, err, text)
+ rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute)
+ }()
+
+ // no error should occur and it should go to the right tablet.
+ utils.Exec(t, conn, "select id1, id2 from t1")
+
+}
diff --git a/go/test/endtoend/reparent/prssettingspool/schema.sql b/go/test/endtoend/reparent/prssettingspool/schema.sql
new file mode 100644
index 00000000000..3e78cab09d6
--- /dev/null
+++ b/go/test/endtoend/reparent/prssettingspool/schema.sql
@@ -0,0 +1,5 @@
+create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go
index a3359d172d5..ca1cec15bd1 100644
--- a/go/test/endtoend/reparent/utils/utils.go
+++ b/go/test/endtoend/reparent/utils/utils.go
@@ -31,6 +31,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
@@ -72,9 +75,25 @@ func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalPro
return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, "semi_sync")
}
-// TeardownCluster is used to teardown the reparent cluster
+// TeardownCluster is used to teardown the reparent cluster. When
+// run in a CI environment -- which is considered true when the
+// "CI" env variable is set to "true" -- the teardown also removes
+// the VTDATAROOT directory that was used for the test/cluster.
func TeardownCluster(clusterInstance *cluster.LocalProcessCluster) {
+ usedRoot := clusterInstance.CurrentVTDATAROOT
clusterInstance.Teardown()
+ // This is always set to "true" on GitHub Actions runners:
+ // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables
+ ci, ok := os.LookupEnv("CI")
+ if !ok || strings.ToLower(ci) != "true" {
+ // Leave the directory in place to support local debugging.
+ return
+ }
+ // We're running in the CI, so free up disk space for any
+ // subsequent tests.
+ if err := os.RemoveAll(usedRoot); err != nil {
+ log.Errorf("Failed to remove previously used VTDATAROOT (%s): %v", usedRoot, err)
+ }
}
func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int, durability string) *cluster.LocalProcessCluster {
@@ -82,7 +101,8 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s
clusterInstance := cluster.NewCluster(cells[0], Hostname)
keyspace := &cluster.Keyspace{Name: KeyspaceName}
- if durability == "semi_sync" {
+ // enable_semi_sync is removed in v16 and shouldn't be set on any release v16+
+ if durability == "semi_sync" && clusterInstance.VtTabletMajorVersion <= 15 {
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--enable_semi_sync")
}
@@ -113,6 +133,10 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s
shard := &cluster.Shard{Name: shardName}
shard.Vttablets = tablets
+ disableReplicationFlag := "--disable_active_reparents"
+ if clusterInstance.VtTabletMajorVersion >= 15 {
+ disableReplicationFlag = "--disable-replication-manager"
+ }
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
"--lock_tables_timeout", "5s",
"--init_populate_metadata",
@@ -129,7 +153,7 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s
// the replication manager to silently fix the replication in case ERS or PRS mess up. All the
// tests in this test suite should work irrespective of this flag. Each run of ERS, PRS should be
// setting up the replication correctly.
- "--disable_active_reparents")
+ disableReplicationFlag)
// Initialize Cluster
err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard})
@@ -700,3 +724,24 @@ func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.V
require.Equal(t, "No", res.Rows[0][11].ToString())
}
}
+
+func WaitForTabletToBeServing(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, timeout time.Duration) {
+ vTablet, err := clusterInstance.VtctlclientGetTablet(tablet)
+ require.NoError(t, err)
+
+ tConn, err := tabletconn.GetDialer()(vTablet, false)
+ require.NoError(t, err)
+
+ newCtx, cancel := context.WithTimeout(context.Background(), timeout)
+ err = tConn.StreamHealth(newCtx, func(shr *querypb.StreamHealthResponse) error {
+ if shr.Serving {
+ cancel()
+ }
+ return nil
+ })
+
+ // the error should only be because we cancelled the context when the tablet became serving again.
+ if err != nil && !strings.Contains(err.Error(), "context canceled") {
+ t.Fatal(err.Error())
+ }
+}
diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go
index 7383c0b7818..843c6800622 100644
--- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go
+++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go
@@ -288,7 +288,7 @@ func (bt *BufferingTest) Test(t *testing.T) {
// Healthcheck interval on tablet is set to 1s, so sleep for 2s
time.Sleep(2 * time.Second)
conn, err := mysql.Connect(context.Background(), &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
// Insert two rows for the later threads (critical read, update).
@@ -350,11 +350,14 @@ func (bt *BufferingTest) Test(t *testing.T) {
//At least one thread should have been buffered.
//This may fail if a failover is too fast. Add retries then.
resp, err := http.Get(clusterInstance.VtgateProcess.VerifyURL)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
require.Equal(t, 200, resp.StatusCode)
var metadata VTGateBufferingStats
- respByte, _ := io.ReadAll(resp.Body)
+ respByte, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
err = json.Unmarshal(respByte, &metadata)
require.NoError(t, err)
diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go
index 239fb0f9bb9..a3876b259f3 100644
--- a/go/test/endtoend/tabletgateway/vtgate_test.go
+++ b/go/test/endtoend/tabletgateway/vtgate_test.go
@@ -45,7 +45,7 @@ func TestVtgateHealthCheck(t *testing.T) {
verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
qr := utils.Exec(t, conn, "show vitess_tablets")
@@ -59,7 +59,7 @@ func TestVtgateReplicationStatusCheck(t *testing.T) {
verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
// Only returns rows for REPLICA and RDONLY tablets -- so should be 2 of them
@@ -72,10 +72,12 @@ func TestVtgateReplicationStatusCheck(t *testing.T) {
func verifyVtgateVariables(t *testing.T, url string) {
resp, err := http.Get(url)
require.NoError(t, err)
+ defer resp.Body.Close()
require.Equal(t, 200, resp.StatusCode, "Vtgate api url response not found")
resultMap := make(map[string]any)
- respByte, _ := io.ReadAll(resp.Body)
+ respByte, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
err = json.Unmarshal(respByte, &resultMap)
require.NoError(t, err)
assert.Contains(t, resultMap, "VtgateVSchemaCounts", "Vschema count should be present in variables")
@@ -203,7 +205,7 @@ func TestReplicaTransactions(t *testing.T) {
// been restarted and the session lost
replicaTablet.VttabletProcess.ServingStatus = "SERVING"
err = replicaTablet.VttabletProcess.Setup()
- require.Nil(t, err)
+ require.NoError(t, err)
serving := replicaTablet.VttabletProcess.WaitForStatus("SERVING", 60*time.Second)
assert.Equal(t, serving, true, "Tablet did not become ready within a reasonable time")
utils.AssertContainsError(t, readConn, fetchAllCustomers, "not found")
diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go
index 3fcb414e25c..a87990e828b 100644
--- a/go/test/endtoend/tabletmanager/primary/tablet_test.go
+++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go
@@ -124,16 +124,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) {
// Make replica tablet as primary
err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
// Run health check on both, make sure they are both healthy.
// Also make sure the types are correct.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, primaryTablet.HTTPPort, false)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, replicaTablet.HTTPPort, false)
checkTabletType(t, primaryTablet.Alias, "REPLICA")
@@ -141,16 +141,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) {
// Come back to the original tablet.
err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
// Run health check on both, make sure they are both healthy.
// Also make sure the types are correct.
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, primaryTablet.HTTPPort, false)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias)
- require.Nil(t, err)
+ require.NoError(t, err)
checkHealth(t, replicaTablet.HTTPPort, false)
checkTabletType(t, primaryTablet.Alias, "PRIMARY")
@@ -165,14 +165,14 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) {
// Make replica as primary
err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING")
- require.Nil(t, err)
+ require.NoError(t, err)
// Capture the current TER.
shrs, err := clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1)
- require.Nil(t, err)
+ require.NoError(t, err)
streamHealthRes1 := shrs[0]
actualType := streamHealthRes1.GetTarget().GetTabletType()
@@ -188,15 +188,15 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) {
// kill the newly promoted primary tablet
err = replicaTablet.VttabletProcess.TearDown()
- require.Nil(t, err)
+ require.NoError(t, err)
// Start Vttablet
err = clusterInstance.StartVttablet(&replicaTablet, "SERVING", false, cell, keyspaceName, hostname, shardName)
- require.Nil(t, err)
+ require.NoError(t, err)
// Make sure that the TER did not change
shrs, err = clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1)
- require.Nil(t, err)
+ require.NoError(t, err)
streamHealthRes2 := shrs[0]
@@ -215,16 +215,17 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) {
// Reset primary
err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID)
- require.Nil(t, err)
+ require.NoError(t, err)
err = primaryTablet.VttabletProcess.WaitForTabletStatus("SERVING")
- require.Nil(t, err)
+ require.NoError(t, err)
}
func checkHealth(t *testing.T, port int, shouldError bool) {
url := fmt.Sprintf("http://localhost:%d/healthz", port)
resp, err := http.Get(url)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
if shouldError {
assert.True(t, resp.StatusCode > 400)
} else {
@@ -234,11 +235,11 @@ func checkHealth(t *testing.T, port int, shouldError bool) {
func checkTabletType(t *testing.T, tabletAlias string, typeWant string) {
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias)
- require.Nil(t, err)
+ require.NoError(t, err)
var tablet topodatapb.Tablet
err = json2.Unmarshal([]byte(result), &tablet)
- require.Nil(t, err)
+ require.NoError(t, err)
actualType := tablet.GetType()
got := fmt.Sprintf("%d", actualType)
diff --git a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go
index 3284ab65d49..d9a24b3b444 100644
--- a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go
+++ b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go
@@ -159,8 +159,7 @@ func waitForSourcePort(ctx context.Context, t *testing.T, tablet cluster.Vttable
for time.Now().Before(timeout) {
// Check that initially replication is setup correctly on the replica tablet
replicaStatus, err := tmcGetReplicationStatus(ctx, tablet.GrpcPort)
- require.NoError(t, err)
- if replicaStatus.SourcePort == expectedPort {
+ if err == nil && replicaStatus.SourcePort == expectedPort {
return nil
}
time.Sleep(300 * time.Millisecond)
diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go
index d3d23e0075f..19359406607 100644
--- a/go/test/endtoend/tabletmanager/tablet_health_test.go
+++ b/go/test/endtoend/tabletmanager/tablet_health_test.go
@@ -202,6 +202,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) {
url := fmt.Sprintf("http://localhost:%d/healthz", port)
resp, err := http.Get(url)
require.NoError(t, err)
+ defer resp.Body.Close()
if shouldError {
assert.True(t, resp.StatusCode > 400)
} else {
diff --git a/go/test/endtoend/tabletmanager/throttler/throttler_test.go b/go/test/endtoend/tabletmanager/throttler/throttler_test.go
index 38f886c034e..dfd03299063 100644
--- a/go/test/endtoend/tabletmanager/throttler/throttler_test.go
+++ b/go/test/endtoend/tabletmanager/throttler/throttler_test.go
@@ -29,6 +29,7 @@ import (
"vitess.io/vitess/go/test/endtoend/cluster"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
var (
@@ -162,7 +163,8 @@ func TestThrottlerBeforeMetricsCollected(t *testing.T) {
// {"StatusCode":404,"Value":0,"Threshold":0,"Message":"No such metric"}
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
}
}
@@ -171,8 +173,9 @@ func warmUpHeartbeat(t *testing.T) (respStatus int) {
// because we run with -heartbeat_on_demand_duration=5s, the heartbeat is "cold" right now.
// Let's warm it up.
resp, err := throttleCheck(primaryTablet)
+ require.NoError(t, err)
+ defer resp.Body.Close()
time.Sleep(time.Second)
- assert.NoError(t, err)
return resp.StatusCode
}
@@ -188,23 +191,27 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) {
time.Sleep(time.Second)
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, body, err := throttledApps(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Contains(t, body, "always-throttled-app")
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(replicaTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
}
@@ -221,18 +228,21 @@ func TestLag(t *testing.T) {
// {"StatusCode":429,"Value":4.864921,"Threshold":1,"Message":"Threshold exceeded"}
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
// self (on primary) is unaffected by replication lag
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(replicaTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
}
}
@@ -248,17 +258,20 @@ func TestLag(t *testing.T) {
time.Sleep(time.Second)
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(replicaTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
}
@@ -276,7 +289,8 @@ func TestNoReplicas(t *testing.T) {
respStatus := warmUpHeartbeat(t)
assert.Equal(t, http.StatusOK, respStatus)
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
@@ -288,7 +302,8 @@ func TestNoReplicas(t *testing.T) {
respStatus := warmUpHeartbeat(t)
assert.NotEqual(t, http.StatusOK, respStatus)
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
}
diff --git a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go
index 39dfd93293d..851773c8655 100644
--- a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go
+++ b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go
@@ -160,7 +160,8 @@ func TestThrottlerThresholdOK(t *testing.T) {
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
}
@@ -173,12 +174,14 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) {
// {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""}
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
}
@@ -197,12 +200,14 @@ func TestThreadsRunning(t *testing.T) {
// {"StatusCode":429,"Value":2,"Threshold":2,"Message":"Threshold exceeded"}
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode)
}
})
@@ -211,12 +216,14 @@ func TestThreadsRunning(t *testing.T) {
// Restore
{
resp, err := throttleCheck(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
{
resp, err := throttleCheckSelf(primaryTablet)
- assert.NoError(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
})
@@ -227,7 +234,7 @@ func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
qr, err := conn.ExecuteFetch(query, 1000, true)
diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go
index 038204108a7..99b2e43de7e 100644
--- a/go/test/endtoend/topoconncache/main_test.go
+++ b/go/test/endtoend/topoconncache/main_test.go
@@ -240,9 +240,10 @@ func testURL(t *testing.T, url string, testCaseName string) {
// getStatusForUrl returns the status code for the URL
func getStatusForURL(url string) int {
- resp, _ := http.Get(url)
- if resp != nil {
- return resp.StatusCode
+ resp, err := http.Get(url)
+ if err != nil {
+ return 0
}
- return 0
+ defer resp.Body.Close()
+ return resp.StatusCode
}
diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go
index 89e51c1e665..a377ed777c8 100644
--- a/go/test/endtoend/utils/cmp.go
+++ b/go/test/endtoend/utils/cmp.go
@@ -85,6 +85,39 @@ func (mcmp *MySQLCompare) AssertMatchesAny(query string, expected ...string) {
mcmp.t.Errorf("Query: %s (-want +got):\n%v\nGot:%s", query, expected, got)
}
+// AssertMatchesAnyNoCompare ensures the given query produces any one of the expected results.
+// This method does not compare the mysql and vitess results together
+func (mcmp *MySQLCompare) AssertMatchesAnyNoCompare(query string, expected ...string) {
+ mcmp.t.Helper()
+
+ mQr, vQr := mcmp.execNoCompare(query)
+ got := fmt.Sprintf("%v", mQr.Rows)
+ valid := false
+ for _, e := range expected {
+ diff := cmp.Diff(e, got)
+ if diff == "" {
+ valid = true
+ break
+ }
+ }
+ if !valid {
+ mcmp.t.Errorf("MySQL Query: %s (-want +got):\n%v\nGot:%s", query, expected, got)
+ }
+ valid = false
+
+ got = fmt.Sprintf("%v", vQr.Rows)
+ for _, e := range expected {
+ diff := cmp.Diff(e, got)
+ if diff == "" {
+ valid = true
+ break
+ }
+ }
+ if !valid {
+ mcmp.t.Errorf("Vitess Query: %s (-want +got):\n%v\nGot:%s", query, expected, got)
+ }
+}
+
// AssertContainsError executes the query on both Vitess and MySQL.
// Both clients need to return an error. The error of Vitess must be matching the given expectation.
func (mcmp *MySQLCompare) AssertContainsError(query, expected string) {
diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go
index bd80385342f..ce10c723c15 100644
--- a/go/test/endtoend/utils/utils.go
+++ b/go/test/endtoend/utils/utils.go
@@ -154,6 +154,17 @@ func SkipIfBinaryIsBelowVersion(t *testing.T, majorVersion int, binary string) {
}
}
+// SkipIfBinaryIsAboveVersion skips the given test if the binary's major version is above majorVersion.
+func SkipIfBinaryIsAboveVersion(t *testing.T, majorVersion int, binary string) {
+ version, err := cluster.GetMajorVersion(binary)
+ if err != nil {
+ return
+ }
+ if version > majorVersion {
+ t.Skip("Current version of ", binary, ": v", version, ", expected version >= v", majorVersion)
+ }
+}
+
// AssertMatchesWithTimeout asserts that the given query produces the expected result.
// The query will be executed every 'r' duration until it matches the expected result.
// If after 'd' duration we still did not find the expected result, the test will be marked as failed.
@@ -211,3 +222,20 @@ func convertToMap(input interface{}) map[string]interface{} {
output := input.(map[string]interface{})
return output
}
+
+// TimeoutAction performs the action within the given timeout limit.
+// If the timeout is reached, the test is failed with errMsg.
+// If action returns false, the timeout loop continues, if it returns true, the function succeeds.
+func TimeoutAction(t *testing.T, timeout time.Duration, errMsg string, action func() bool) {
+ deadline := time.After(timeout)
+ ok := false
+ for !ok {
+ select {
+ case <-deadline:
+ t.Error(errMsg)
+ return
+ case <-time.After(1 * time.Second):
+ ok = action()
+ }
+ }
+}
diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go
index 264c748a5bb..8c0226a3e1c 100644
--- a/go/test/endtoend/vreplication/helper_test.go
+++ b/go/test/endtoend/vreplication/helper_test.go
@@ -23,6 +23,7 @@ import (
"net/http"
"os/exec"
"regexp"
+ "sort"
"strconv"
"strings"
"testing"
@@ -97,10 +98,8 @@ func execVtgateQuery(t *testing.T, conn *mysql.Conn, database string, query stri
func checkHealth(t *testing.T, url string) bool {
resp, err := http.Get(url)
require.NoError(t, err)
- if err != nil || resp.StatusCode != 200 {
- return false
- }
- return true
+ defer resp.Body.Close()
+ return resp.StatusCode == 200
}
func waitForQueryResult(t *testing.T, conn *mysql.Conn, database string, query string, want string) {
@@ -129,9 +128,9 @@ func waitForTabletThrottlingStatus(t *testing.T, tablet *cluster.VttabletProcess
timer := time.NewTimer(defaultTimeout)
defer timer.Stop()
for {
- _, output, err := throttlerCheckSelf(tablet, appName)
+ output, err := throttlerCheckSelf(tablet, appName)
require.NoError(t, err)
- require.NotNil(t, output)
+
gotCode, err = jsonparser.GetInt([]byte(output), "StatusCode")
require.NoError(t, err)
if wantCode == gotCode {
@@ -503,3 +502,40 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) {
}
}
}
+
+// getShardRoutingRules returns the shard routing rules stored in the
+// topo. It returns the rules sorted by shard,to_keyspace and with all
+// newlines and whitespace removed so that we have predictable,
+// compact, and easy to compare results for tests.
+func getShardRoutingRules(t *testing.T) string {
+ output, err := osExec(t, "vtctldclient", []string{"--server", getVtctldGRPCURL(), "GetShardRoutingRules"})
+ log.Infof("GetShardRoutingRules err: %+v, output: %+v", err, output)
+ require.Nilf(t, err, output)
+ require.NotNil(t, output)
+
+ // Sort the rules by shard,to_keyspace
+ jsonOutput := gjson.Parse(output)
+ rules := jsonOutput.Get("rules").Array()
+ sort.Slice(rules, func(i, j int) bool {
+ shardI := rules[i].Get("shard").String()
+ shardJ := rules[j].Get("shard").String()
+ if shardI == shardJ {
+ return rules[i].Get("to_keyspace").String() < rules[j].Get("to_keyspace").String()
+ }
+ return shardI < shardJ
+ })
+ sb := strings.Builder{}
+ for i := 0; i < len(rules); i++ {
+ if i > 0 {
+ sb.WriteString(",")
+ }
+ sb.WriteString(rules[i].String())
+ }
+ output = fmt.Sprintf(`{"rules":[%s]}`, sb.String())
+
+ // Remove newlines and whitespace
+ re := regexp.MustCompile(`[\n\s]+`)
+ output = re.ReplaceAllString(output, "")
+ output = strings.TrimSpace(output)
+ return output
+}
diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go
index 0016a0771dd..0645a3a4342 100644
--- a/go/test/endtoend/vreplication/materialize_test.go
+++ b/go/test/endtoend/vreplication/materialize_test.go
@@ -19,6 +19,7 @@ package vreplication
import (
"fmt"
"testing"
+ "time"
"github.com/stretchr/testify/require"
)
@@ -78,10 +79,10 @@ func testShardedMaterialize(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks1, "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks1, "0"), 1, 30*time.Second)
vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200, nil)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks2, "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks2, "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -194,10 +195,10 @@ func testMaterialize(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, "0"), 1, 30*time.Second)
vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", smMaterializeVSchemaTarget, smMaterializeSchemaTarget, defaultReplicas, defaultRdonly, 400, nil)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go
index 18745aea4cd..b08a2a945fb 100644
--- a/go/test/endtoend/vreplication/migrate_test.go
+++ b/go/test/endtoend/vreplication/migrate_test.go
@@ -19,6 +19,7 @@ package vreplication
import (
"fmt"
"testing"
+ "time"
"github.com/stretchr/testify/require"
@@ -57,7 +58,7 @@ func TestMigrate(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -76,7 +77,7 @@ func TestMigrate(t *testing.T) {
extVtgate := extCell2.Vtgates[0]
require.NotNil(t, extVtgate)
- extVtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "rating", "0"), 1)
+ extVtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "rating", "0"), 1, 30*time.Second)
verifyClusterHealth(t, extVc)
extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort)
insertInitialDataIntoExternalCluster(t, extVtgateConn)
diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go
new file mode 100644
index 00000000000..c130000e53a
--- /dev/null
+++ b/go/test/endtoend/vreplication/partial_movetables_test.go
@@ -0,0 +1,275 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vreplication
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
+
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/wrangler"
+)
+
+// TestPartialMoveTables tests partial move tables by moving each
+// customer shard -- -80,80- -- once a a time to customer2.
+func TestPartialMoveTables(t *testing.T) {
+ origDefaultRdonly := defaultRdonly
+ defer func() {
+ defaultRdonly = origDefaultRdonly
+ }()
+ defaultRdonly = 1
+ origExtraVTGateArgs := extraVTGateArgs
+ // We need to enable shard routing for partial movetables routing.
+ // And we need to disable schema change tracking in vtgate as we want
+ // to test query routing using a query we know will fail as it's
+ // using a column that doesn't exist in the schema -- this way we
+ // get the target shard details back in the error message. If schema
+ // tracking is enabled then vtgate will produce an error about the
+ // unknown symbol before attempting to route the query.
+ extraVTGateArgs = append(extraVTGateArgs, []string{
+ "--enable-partial-keyspace-migration",
+ "--schema_change_signal=false",
+ }...)
+ defer func() {
+ extraVTGateArgs = origExtraVTGateArgs
+ }()
+ vc = setupCluster(t)
+ defer vtgateConn.Close()
+ defer vc.TearDown(t)
+ setupCustomerKeyspace(t)
+
+ // Move customer table from unsharded product keyspace to
+ // sharded customer keyspace.
+ createMoveTablesWorkflow(t, "customer")
+ tstWorkflowSwitchReadsAndWrites(t)
+ tstWorkflowComplete(t)
+
+ emptyGlobalRoutingRules := "{}\n"
+
+ // These should be listed in shard order
+ emptyShardRoutingRules := `{"rules":[]}`
+ preCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer2","to_keyspace":"customer","shard":"80-"}]}`
+ halfCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}`
+ postCutoverShardRoutingRules := `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}`
+
+ // Remove any manually applied shard routing rules as these
+ // should be set by SwitchTraffic.
+ applyShardRoutingRules(t, emptyShardRoutingRules)
+ require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t))
+
+ // Now setup the customer2 keyspace so we can do a partial
+ // move tables for one of the two shards: 80-.
+ defaultRdonly = 0
+ setupCustomer2Keyspace(t)
+ currentWorkflowType = wrangler.MoveTablesWorkflow
+ wfName := "partial80Dash"
+ sourceKs := "customer"
+ targetKs := "customer2"
+ shard := "80-"
+ ksWf := fmt.Sprintf("%s.%s", targetKs, wfName)
+
+ // start the partial movetables for 80-
+ err := tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs,
+ "customer", workflowActionCreate, "", shard, "")
+ require.NoError(t, err)
+ targetTab1 = vc.getPrimaryTablet(t, targetKs, shard)
+ catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2")
+ vdiff1(t, ksWf, "")
+
+ waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards
+ waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards
+ waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80-
+
+ confirmGlobalRoutingToSource := func() {
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules")
+ require.NoError(t, err)
+ result := gjson.Get(output, "rules")
+ result.ForEach(func(attributeKey, attributeValue gjson.Result) bool {
+ // 0 is the keyspace and 1 is optional tablename[@tablettype]
+ fromKsTbl := strings.Split(attributeValue.Get("fromTable").String(), ".")
+ // 0 is the keyspace and 1 is the tablename
+ toKsTbl := strings.Split(attributeValue.Get("toTables.0").String(), ".")
+ // All tables in the customer and customer2 keyspaces should be
+ // routed to the customer keyspace.
+ if fromKsTbl[0] == "customer" || fromKsTbl[0] == "customer2" {
+ require.Equal(t, "customer", toKsTbl[0])
+ }
+ return true
+ })
+ }
+
+ // This query uses an ID that should always get routed to shard 80-
+ shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'"
+ // This query uses an ID that should always get routed to shard -80
+ shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'"
+
+ // reset any existing vtgate connection state
+ vtgateConn.Close()
+ vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
+ defer vtgateConn.Close()
+
+ // Global routing rules should be in place with everything going to
+ // the source keyspace (customer).
+ confirmGlobalRoutingToSource()
+
+ // Shard routing rules should now also be in place with everything
+ // going to the source keyspace (customer).
+ require.Equal(t, preCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // Confirm shard targeting works before we switch any traffic.
+ // Everything should be routed to the source keyspace (customer).
+
+ log.Infof("Testing reverse route (target->source) for shard being switched")
+ _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic")
+
+ log.Infof("Testing reverse route (target->source) for shard NOT being switched")
+ _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic")
+
+ // Switch all traffic for the shard
+ require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", ""))
+ expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n",
+ targetKs, wfName, shard, shard)
+ require.Equal(t, expectedSwitchOutput, lastOutput)
+
+ // Confirm global routing rules -- everything should still be routed
+ // to the source side, customer, globally.
+ confirmGlobalRoutingToSource()
+
+ // Confirm shard routing rules -- all traffic for the 80- shard should be
+ // routed into the customer2 keyspace, overriding the global routing rules.
+ require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // reset any existing vtgate connection state
+ vtgateConn.Close()
+ vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
+ defer vtgateConn.Close()
+
+ // No shard targeting
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic")
+
+ // Shard targeting
+ _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic")
+
+ // Tablet type targeting
+ _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false)
+ require.NoError(t, err)
+ _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic")
+ _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic")
+
+ // We cannot Complete a partial move tables at the moment because
+ // it will find that all traffic has (obviously) not been switched.
+ err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "")
+ require.Error(t, err)
+
+ // Confirm global routing rules: -80 should still be be routed to customer
+ // while 80- should be routed to customer2.
+ require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // Now move the other shard: -80
+ wfName = "partialDash80"
+ shard = "-80"
+ ksWf = fmt.Sprintf("%s.%s", targetKs, wfName)
+
+ // Start the partial movetables for -80, 80- has already been switched
+ err = tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs,
+ "customer", workflowActionCreate, "", shard, "")
+ require.NoError(t, err)
+ targetTab2 := vc.getPrimaryTablet(t, targetKs, shard)
+ catchup(t, targetTab2, wfName, "Partial MoveTables Customer to Customer2: -80")
+ vdiff1(t, ksWf, "")
+ // Switch all traffic for the shard
+ require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", ""))
+ expectedSwitchOutput = fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n",
+ targetKs, wfName)
+ require.Equal(t, expectedSwitchOutput, lastOutput)
+
+ // Confirm global routing rules: everything should still be routed
+ // to the source side, customer, globally.
+ confirmGlobalRoutingToSource()
+
+ // Confirm shard routing rules: all shards should be routed to the
+ // target side (customer2).
+ require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t))
+
+ // Cancel both reverse workflows (as we've done the cutover), which should
+ // clean up both the global routing rules and the shard routing rules.
+ for _, wf := range []string{"partialDash80", "partial80Dash"} {
+ // We switched traffic, so it's the reverse workflow we want to cancel.
+ reverseWf := wf + "_reverse"
+ reverseKs := sourceKs // customer
+ err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "")
+ require.NoError(t, err)
+
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show")
+ require.Error(t, err)
+ require.Contains(t, output, "no streams found")
+
+ // Delete the original workflow
+ originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf)
+ _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "delete")
+ require.NoError(t, err)
+ output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "show")
+ require.Error(t, err)
+ require.Contains(t, output, "no streams found")
+ }
+
+ // Confirm that the global routing rules are now gone.
+ output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules")
+ require.NoError(t, err)
+ require.Equal(t, emptyGlobalRoutingRules, output)
+
+ // Confirm that the shard routing rules are now gone.
+ require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t))
+}
diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go
index 14d5d6c867c..3bc67d9a9db 100644
--- a/go/test/endtoend/vreplication/performance_test.go
+++ b/go/test/endtoend/vreplication/performance_test.go
@@ -63,7 +63,7 @@ create table customer(cid int, name varbinary(128), meta json default null, typ
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go
index 7b085a9321b..0e9a30a470c 100644
--- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go
+++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go
@@ -19,14 +19,13 @@ package vreplication
import (
"fmt"
"net"
- "regexp"
"strconv"
"strings"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/tidwall/gjson"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
@@ -259,152 +258,6 @@ func TestBasicV2Workflows(t *testing.T) {
log.Flush()
}
-// TestPartialMoveTables tests partial move tables by moving just one shard
-// 80- from customer to customer2.
-func TestPartialMoveTables(t *testing.T) {
- defaultRdonly = 1
- origExtraVTGateArgs := extraVTGateArgs
- // We need to enable shard routing for partial movetables routing.
- // And we need to disable schema change tracking in vtgate as we want
- // to test query routing using a query we know will fail as it's
- // using a column that doesn't exist in the schema -- this way we
- // get the target shard details back in the error message. If schema
- // tracking is enabled then vtgate will produce an error about the
- // unknown symbol before attempting to route the query.
- extraVTGateArgs = append(extraVTGateArgs, []string{
- "--enable-partial-keyspace-migration",
- "--schema_change_signal=false",
- }...)
- defer func() {
- extraVTGateArgs = origExtraVTGateArgs
- }()
- vc = setupCluster(t)
- defer vtgateConn.Close()
- defer vc.TearDown(t)
- setupCustomerKeyspace(t)
-
- // Move customer table from unsharded product keyspace to
- // sharded customer keyspace.
- createMoveTablesWorkflow(t, "customer")
- tstWorkflowSwitchReadsAndWrites(t)
- tstWorkflowComplete(t)
-
- // Now setup the customer2 keyspace so we can do a partial
- // move tables for one of the two shards: 80-.
- defaultRdonly = 0
- setupCustomer2Keyspace(t)
- currentWorkflowType = wrangler.MoveTablesWorkflow
- wfName := "partial"
- moveToKs := "customer2"
- shard := "80-"
- ksWf := fmt.Sprintf("%s.%s", moveToKs, wfName)
- err := tstWorkflowExec(t, defaultCellName, wfName, targetKs, moveToKs,
- "customer", workflowActionCreate, "", shard, "")
- require.NoError(t, err)
- targetTab1 = vc.getPrimaryTablet(t, moveToKs, shard)
- catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2")
- vdiff1(t, ksWf, "")
-
- waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards
- waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer: all shards
- waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80-
-
- // Remove any manually applied shard routing rules as these
- // should be set by SwitchTraffic.
- emptyRules := `{"rules":[]}`
- applyShardRoutingRules(t, emptyRules)
- require.Equal(t, emptyRules, getShardRoutingRules(t))
-
- // switch all traffic
- require.NoError(t, tstWorkflowExec(t, "", wfName, "", moveToKs, "", workflowActionSwitchTraffic, "", "", ""))
- expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow customer2.partial\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n",
- shard, shard)
- require.Equal(t, expectedSwitchOutput, lastOutput)
-
- // Confirm global routing rules -- everything should still be routed
- // to the source side, customer, globally.
- output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules")
- require.NoError(t, err)
- result := gjson.Get(output, "rules")
- result.ForEach(func(attributeKey, attributeValue gjson.Result) bool {
- // 0 is the keyspace and 1 is optional tablename[@tablettype]
- fromKsTbl := strings.Split(attributeValue.Get("fromTable").String(), ".")
- // 0 is the keyspace and 1 is the tablename
- toKsTbl := strings.Split(attributeValue.Get("toTables.0").String(), ".")
- // All tables in the customer and customer2 keyspaces should be
- // routed to the customer keyspace.
- if fromKsTbl[0] == "customer" || fromKsTbl[0] == "customer2" {
- require.Equal(t, "customer", toKsTbl[0])
- }
- return true
- })
- // Confirm shard routing rules -- all traffic for the 80- shard should be
- // routed into the customer2 keyspace, overriding the global routing rules.
- expectedShardRoutingRules := `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}`
- require.Equal(t, expectedShardRoutingRules, getShardRoutingRules(t))
-
- // This query uses an ID that should always get routed to customer2:80-
- targetRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'"
- // This query uses an ID that should always get routed to customer:-80
- sourceRoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'"
-
- // reset any existing vtgate connection state
- vtgateConn.Close()
- vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
- defer vtgateConn.Close()
-
- // No shard targeting
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.primary")
- _, err = vtgateConn.ExecuteFetch(sourceRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer.-80.primary")
-
- // Shard targeting
- _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.primary")
- _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.primary")
-
- // Tablet type targeting
- _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.replica")
- _, err = vtgateConn.ExecuteFetch(sourceRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer.-80.replica")
- _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false)
- require.NoError(t, err)
- _, err = vtgateConn.ExecuteFetch(targetRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer2.80-.replica")
- _, err = vtgateConn.ExecuteFetch(sourceRoutedQuery, 0, false)
- require.Error(t, err)
- require.Contains(t, err.Error(), "target: customer.-80.replica")
-
- // We cannot Complete a partial move tables at the moment because it will
- // find that all traffic has (obviously) not been switched we need to
- // cleanup using Workflow delete.
- err = tstWorkflowExec(t, "", wfName, "", moveToKs, "", workflowActionComplete, "", "", "")
- require.Error(t, err)
- require.Equal(t, expectedShardRoutingRules, getShardRoutingRules(t))
- _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWf, "delete")
- require.NoError(t, err)
- output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWf, "show")
- require.Error(t, err)
- require.Contains(t, output, "no streams found")
-
-}
-
func getVtctldGRPCURL() string {
return net.JoinHostPort("localhost", strconv.Itoa(vc.Vtctld.GrpcPort))
}
@@ -416,17 +269,6 @@ func applyShardRoutingRules(t *testing.T, rules string) {
require.NotNil(t, output)
}
-func getShardRoutingRules(t *testing.T) string {
- output, err := osExec(t, "vtctldclient", []string{"--server", getVtctldGRPCURL(), "GetShardRoutingRules"})
- log.Infof("GetShardRoutingRules err: %+v, output: %+v", err, output)
- require.Nilf(t, err, output)
- require.NotNil(t, output)
- re := regexp.MustCompile(`[\n\s]+`)
- output = re.ReplaceAllString(output, "")
- output = strings.TrimSpace(output)
- return output
-}
-
/*
testVSchemaForSequenceAfterMoveTables checks that the related sequence tag is migrated correctly in the vschema
while moving a table with an auto-increment from sharded to unsharded.
@@ -589,7 +431,7 @@ func testMoveTablesV2Workflow(t *testing.T) {
}
func testPartialSwitches(t *testing.T) {
- //nothing switched
+ // nothing switched
require.Equal(t, getCurrentState(t), wrangler.WorkflowStateNotSwitched)
tstWorkflowSwitchReads(t, "replica,rdonly", "zone1")
nextState := "Reads partially switched. Replica switched in cells: zone1. Rdonly switched in cells: zone1. Writes Not Switched"
@@ -601,7 +443,7 @@ func testPartialSwitches(t *testing.T) {
checkStates(t, currentState, nextState)
tstWorkflowSwitchReads(t, "", "")
- checkStates(t, nextState, nextState) //idempotency
+ checkStates(t, nextState, nextState) // idempotency
tstWorkflowSwitchWrites(t)
currentState = nextState
@@ -609,7 +451,7 @@ func testPartialSwitches(t *testing.T) {
checkStates(t, currentState, nextState)
tstWorkflowSwitchWrites(t)
- checkStates(t, nextState, nextState) //idempotency
+ checkStates(t, nextState, nextState) // idempotency
keyspace := "product"
if currentWorkflowType == wrangler.ReshardWorkflow {
@@ -725,9 +567,9 @@ func setupCluster(t *testing.T) *VitessCluster {
vtgate = zone1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second))
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
verifyClusterHealth(t, vc)
@@ -745,24 +587,10 @@ func setupCustomerKeyspace(t *testing.T) {
customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1); err != nil {
- t.Fatal(err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second))
custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"]
targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet
targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet
@@ -778,18 +606,14 @@ func setupCustomer2Keyspace(t *testing.T) {
t.Fatal(err)
}
for _, c2shard := range c2shards {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
if defaultReplicas > 0 {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", c2keyspace, c2shard), defaultReplicas); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", c2keyspace, c2shard), defaultReplicas, 30*time.Second))
}
if defaultRdonly > 0 {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", c2keyspace, c2shard), defaultRdonly); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", c2keyspace, c2shard), defaultRdonly, 30*time.Second))
}
}
}
@@ -908,15 +732,11 @@ func createAdditionalCustomerShards(t *testing.T, shards string) {
arrTargetShardNames := strings.Split(shards, ",")
for _, shardName := range arrTargetShardNames {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ksName, shardName), 1); err != nil {
- require.NoError(t, err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2); err != nil {
- require.NoError(t, err)
- }
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ksName, shardName), 1, 2*time.Minute); err != nil {
require.NoError(t, err)
}
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second))
}
custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName]
targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet
diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go
index b10cd55e048..92acf1b237b 100644
--- a/go/test/endtoend/vreplication/time_zone_test.go
+++ b/go/test/endtoend/vreplication/time_zone_test.go
@@ -51,7 +51,7 @@ func TestMoveTablesTZ(t *testing.T) {
vtgate = cell1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -87,7 +87,7 @@ func TestMoveTablesTZ(t *testing.T) {
if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "0"), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "0"), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go
index c20fc435b84..2368bdc01bd 100644
--- a/go/test/endtoend/vreplication/vdiff2_test.go
+++ b/go/test/endtoend/vreplication/vdiff2_test.go
@@ -121,7 +121,7 @@ func TestVDiff2(t *testing.T) {
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
for _, shard := range sourceShards {
- require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1, 30*time.Second))
}
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
@@ -139,7 +139,7 @@ func TestVDiff2(t *testing.T) {
_, err := vc.AddKeyspace(t, cells, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts)
require.NoError(t, err)
for _, shard := range targetShards {
- require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1, 30*time.Second))
}
for _, tc := range testCases {
@@ -155,7 +155,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell)
tks := vc.Cells[cells[0].Name].Keyspaces[tc.targetKs]
require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts))
for _, shard := range arrTargetShards {
- require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", tc.targetKs, shard), 1))
+ require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", tc.targetKs, shard), 1, 30*time.Second))
}
}
ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow)
diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go
index 40e34bbfce4..74e724ad52e 100644
--- a/go/test/endtoend/vreplication/vreplication_test.go
+++ b/go/test/endtoend/vreplication/vreplication_test.go
@@ -82,34 +82,36 @@ func init() {
defaultReplicas = 1
}
-func throttleResponse(tablet *cluster.VttabletProcess, path string) (resp *http.Response, respBody string, err error) {
+func throttleResponse(tablet *cluster.VttabletProcess, path string) (respBody string, err error) {
apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.TabletHostname, tablet.Port, path)
- resp, err = httpClient.Get(apiURL)
+ resp, err := httpClient.Get(apiURL)
if err != nil {
- return resp, respBody, err
+ return "", err
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
respBody = string(b)
- return resp, respBody, err
+ return respBody, err
}
-func throttleApp(tablet *cluster.VttabletProcess, app string) (*http.Response, string, error) {
+func throttleApp(tablet *cluster.VttabletProcess, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app))
}
-func unthrottleApp(tablet *cluster.VttabletProcess, app string) (*http.Response, string, error) {
+func unthrottleApp(tablet *cluster.VttabletProcess, app string) (string, error) {
return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app))
}
-func throttlerCheckSelf(tablet *cluster.VttabletProcess, app string) (resp *http.Response, respBody string, err error) {
+func throttlerCheckSelf(tablet *cluster.VttabletProcess, app string) (respBody string, err error) {
apiURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", tablet.TabletHostname, tablet.Port, app)
- resp, err = httpClient.Get(apiURL)
+ resp, err := httpClient.Get(apiURL)
if err != nil {
- return resp, respBody, err
+ return "", err
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
respBody = string(b)
- return resp, respBody, err
+ return respBody, err
}
func TestVreplicationCopyThrottling(t *testing.T) {
@@ -137,8 +139,8 @@ func TestVreplicationCopyThrottling(t *testing.T) {
}
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1, 30*time.Second)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1, 30*time.Second)
// Confirm that the initial copy table phase does not proceed until the source tablet(s)
// have an InnoDB History List length that is less than specified in the tablet's config.
@@ -181,7 +183,7 @@ func testBasicVreplicationWorkflow(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -238,8 +240,8 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) {
vtgate = cell1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -274,8 +276,8 @@ func TestVStreamFlushBinlog(t *testing.T) {
}
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKs, shard), 1, 30*time.Second)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKs, shard), 1, 30*time.Second)
verifyClusterHealth(t, vc)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
@@ -365,6 +367,7 @@ func testVStreamCellFlag(t *testing.T) {
flags := &vtgatepb.VStreamFlags{}
if tc.cells != "" {
flags.Cells = tc.cells
+ flags.CellPreference = "onlyspecified"
}
ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
@@ -433,8 +436,8 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) {
vtgate = cell1.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -567,10 +570,10 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
@@ -837,7 +840,7 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou
arrTargetShardNames := strings.Split(targetShards, ",")
for _, shardName := range arrTargetShardNames {
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ksName, shardName), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ksName, shardName), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
}
@@ -923,10 +926,10 @@ func shardMerchant(t *testing.T) {
if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", merchantKeyspace, "-80"), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", merchantKeyspace, "-80"), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
- if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", merchantKeyspace, "80-"), 1); err != nil {
+ if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", merchantKeyspace, "80-"), 1, 30*time.Second); err != nil {
t.Fatal(err)
}
moveTables(t, cell, workflow, sourceKs, targetKs, tables)
@@ -979,7 +982,7 @@ func materializeProduct(t *testing.T) {
t.Run("throttle-app-product", func(t *testing.T) {
// Now, throttle the streamer on source tablets, insert some rows
for _, tab := range productTablets {
- _, body, err := throttleApp(tab, sourceThrottlerAppName)
+ body, err := throttleApp(tab, sourceThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, sourceThrottlerAppName)
@@ -997,7 +1000,7 @@ func materializeProduct(t *testing.T) {
t.Run("unthrottle-app-product", func(t *testing.T) {
// unthrottle on source tablets, and expect the rows to show up
for _, tab := range productTablets {
- _, body, err := unthrottleApp(tab, sourceThrottlerAppName)
+ body, err := unthrottleApp(tab, sourceThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, sourceThrottlerAppName)
// give time for unthrottling to take effect and for target to fetch data
@@ -1012,7 +1015,7 @@ func materializeProduct(t *testing.T) {
// Now, throttle vreplication (vcopier/vapplier) on target tablets, and
// insert some more rows.
for _, tab := range customerTablets {
- _, body, err := throttleApp(tab, targetThrottlerAppName)
+ body, err := throttleApp(tab, targetThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, targetThrottlerAppName)
// Wait for throttling to take effect (caching will expire by this time):
@@ -1030,7 +1033,7 @@ func materializeProduct(t *testing.T) {
t.Run("unthrottle-app-customer", func(t *testing.T) {
// unthrottle on target tablets, and expect the rows to show up
for _, tab := range customerTablets {
- _, body, err := unthrottleApp(tab, targetThrottlerAppName)
+ body, err := unthrottleApp(tab, targetThrottlerAppName)
assert.NoError(t, err)
assert.Contains(t, body, targetThrottlerAppName)
}
diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go
index 731679e1eba..218f56cb890 100644
--- a/go/test/endtoend/vreplication/vschema_load_test.go
+++ b/go/test/endtoend/vreplication/vschema_load_test.go
@@ -52,8 +52,8 @@ func TestVSchemaChangesUnderLoad(t *testing.T) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go
index c596ed084f7..6899e7c583b 100644
--- a/go/test/endtoend/vreplication/vstream_test.go
+++ b/go/test/endtoend/vreplication/vstream_test.go
@@ -57,7 +57,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) {
vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -177,6 +177,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) {
const schemaUnsharded = `
create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';
+insert into customer_seq(id, next_id, cache) values(0, 1, 3);
`
const vschemaUnsharded = `
{
@@ -188,7 +189,7 @@ const vschemaUnsharded = `
}
`
const schemaSharded = `
-create table customer(cid int, name varbinary(128), primary key(cid)) CHARSET=utf8mb4;
+create table customer(cid int, name varbinary(128), primary key(cid)) TABLESPACE innodb_system CHARSET=utf8mb4;
`
const vschemaSharded = `
{
@@ -218,14 +219,18 @@ const vschemaSharded = `
func insertRow(keyspace, table string, id int) {
vtgateConn.ExecuteFetch(fmt.Sprintf("use %s;", keyspace), 1000, false)
vtgateConn.ExecuteFetch("begin", 1000, false)
- vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (cid, name) values (%d, '%s%d')", table, id+100, table, id), 1000, false)
+ _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false)
+ if err != nil {
+ log.Infof("error inserting row %d: %v", id, err)
+ }
vtgateConn.ExecuteFetch("commit", 1000, false)
}
type numEvents struct {
- numRowEvents, numJournalEvents int64
- numLessThan80Events, numGreaterThan80Events int64
- numLessThan40Events, numGreaterThan40Events int64
+ numRowEvents, numJournalEvents int64
+ numLessThan80Events, numGreaterThan80Events int64
+ numLessThan40Events, numGreaterThan40Events int64
+ numShard0BeforeReshardEvents, numShard0AfterReshardEvents int64
}
// tests the StopOnReshard flag
@@ -245,7 +250,7 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID
vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil)
vtgate = defaultCell.Vtgates[0]
require.NotNil(t, vtgate)
- vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, 30*time.Second)
vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
defer vtgateConn.Close()
@@ -375,6 +380,150 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID
return &ne
}
+// Validate that we can continue streaming from multiple keyspaces after first copying some tables and then resharding one of the keyspaces
+// Ensure that there are no missing row events during the resharding process.
+func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEvents {
+ defaultCellName := "zone1"
+ allCellNames = defaultCellName
+ allCells := []string{allCellNames}
+ vc = NewVitessCluster(t, "VStreamCopyMultiKeyspaceReshard", allCells, mainClusterConfig)
+
+ require.NotNil(t, vc)
+ ogdr := defaultReplicas
+ defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets
+ defer func(dr int) { defaultReplicas = dr }(ogdr)
+
+ defer vc.TearDown(t)
+
+ defaultCell = vc.Cells[defaultCellName]
+ vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil)
+ vtgate = defaultCell.Vtgates[0]
+ require.NotNil(t, vtgate)
+ vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, time.Second*30)
+
+ vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort)
+ defer vtgateConn.Close()
+ verifyClusterHealth(t, vc)
+
+ vc.AddKeyspace(t, []*Cell{defaultCell}, "sharded", "-80,80-", vschemaSharded, schemaSharded, defaultReplicas, defaultRdonly, baseTabletID+200, nil)
+
+ ctx := context.Background()
+ vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer vstreamConn.Close()
+ vgtid := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{{
+ Keyspace: "/.*",
+ }}}
+
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ // We want to confirm that the following two tables are streamed.
+ // 1. the customer_seq in the unsharded keyspace
+ // 2. the customer table in the sharded keyspace
+ Match: "/customer.*/",
+ }},
+ }
+ flags := &vtgatepb.VStreamFlags{}
+ done := false
+
+ id := 1000
+ // First goroutine that keeps inserting rows into the table being streamed until a minute after reshard
+ // We should keep getting events on the new shards
+ go func() {
+ for {
+ if done {
+ return
+ }
+ id++
+ time.Sleep(1 * time.Second)
+ insertRow("sharded", "customer", id)
+ }
+ }()
+ // stream events from the VStream API
+ var ne numEvents
+ reshardDone := false
+ go func() {
+ var reader vtgateconn.VStreamReader
+ reader, err = vstreamConn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags)
+ require.NoError(t, err)
+ for {
+ evs, err := reader.Recv()
+
+ switch err {
+ case nil:
+ for _, ev := range evs {
+ switch ev.Type {
+ case binlogdatapb.VEventType_ROW:
+ shard := ev.RowEvent.Shard
+ switch shard {
+ case "0":
+ if reshardDone {
+ ne.numShard0AfterReshardEvents++
+ } else {
+ ne.numShard0BeforeReshardEvents++
+ }
+ case "-80":
+ ne.numLessThan80Events++
+ case "80-":
+ ne.numGreaterThan80Events++
+ case "-40":
+ ne.numLessThan40Events++
+ case "40-":
+ ne.numGreaterThan40Events++
+ }
+ ne.numRowEvents++
+ case binlogdatapb.VEventType_JOURNAL:
+ ne.numJournalEvents++
+ }
+ }
+ case io.EOF:
+ log.Infof("Stream Ended")
+ done = true
+ default:
+ log.Errorf("Returned err %v", err)
+ done = true
+ }
+ if done {
+ return
+ }
+ }
+ }()
+
+ ticker := time.NewTicker(1 * time.Second)
+ tickCount := 0
+ for {
+ <-ticker.C
+ tickCount++
+ switch tickCount {
+ case 1:
+ reshard(t, "sharded", "customer", "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, defaultCellName)
+ reshardDone = true
+ case 60:
+ done = true
+ }
+ if done {
+ break
+ }
+ }
+ log.Infof("ne=%v", ne)
+
+ // The number of row events streamed by the VStream API should match the number of rows inserted.
+ // This is important for sharded tables, where we need to ensure that no row events are missed during the resharding process.
+ //
+ // On the other hand, we don't verify the exact number of row events for the unsharded keyspace
+ // because the keyspace remains unsharded and the number of rows in the customer_seq table is always 1.
+ // We believe that checking the number of row events for the unsharded keyspace, which should always be greater than 0 before and after resharding,
+ // is sufficient to confirm that the resharding of one keyspace does not affect another keyspace, while keeping the test straightforward.
+ customerResult := execVtgateQuery(t, vtgateConn, "sharded", "select count(*) from customer")
+ insertedCustomerRows, err := evalengine.ToInt64(customerResult.Rows[0][0])
+ require.NoError(t, err)
+ require.Equal(t, insertedCustomerRows, ne.numLessThan80Events+ne.numGreaterThan80Events+ne.numLessThan40Events+ne.numGreaterThan40Events)
+ return ne
+}
+
func TestVStreamFailover(t *testing.T) {
testVStreamWithFailover(t, true)
}
@@ -406,3 +555,15 @@ func TestVStreamWithKeyspacesToWatch(t *testing.T) {
testVStreamWithFailover(t, false)
}
+
+func TestVStreamCopyMultiKeyspaceReshard(t *testing.T) {
+ ne := testVStreamCopyMultiKeyspaceReshard(t, 3000)
+ require.Equal(t, int64(0), ne.numJournalEvents)
+ require.NotZero(t, ne.numRowEvents)
+ require.NotZero(t, ne.numShard0BeforeReshardEvents)
+ require.NotZero(t, ne.numShard0AfterReshardEvents)
+ require.NotZero(t, ne.numLessThan80Events)
+ require.NotZero(t, ne.numGreaterThan80Events)
+ require.NotZero(t, ne.numLessThan40Events)
+ require.NotZero(t, ne.numGreaterThan40Events)
+}
diff --git a/go/test/endtoend/vtcombo/vttest_sample_test.go b/go/test/endtoend/vtcombo/vttest_sample_test.go
index df9e11a98f2..91db0f8a2c0 100644
--- a/go/test/endtoend/vtcombo/vttest_sample_test.go
+++ b/go/test/endtoend/vtcombo/vttest_sample_test.go
@@ -30,7 +30,7 @@ import (
"strings"
"testing"
- mysql "github.com/go-sql-driver/mysql"
+ "github.com/go-sql-driver/mysql"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -123,12 +123,13 @@ func TestMain(m *testing.M) {
func TestStandalone(t *testing.T) {
// validate debug vars
resp, err := http.Get(fmt.Sprintf("http://%s/debug/vars", vtctldAddr))
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
require.Equal(t, 200, resp.StatusCode)
resultMap := make(map[string]any)
respByte, _ := io.ReadAll(resp.Body)
err = json.Unmarshal(respByte, &resultMap)
- require.Nil(t, err)
+ require.NoError(t, err)
cmd := resultMap["cmdline"]
require.NotNil(t, cmd, "cmdline is not available in debug vars")
tmp, _ := cmd.([]any)
@@ -136,7 +137,7 @@ func TestStandalone(t *testing.T) {
ctx := context.Background()
conn, err := vtgateconn.Dial(ctx, grpcAddress)
- require.Nil(t, err)
+ require.NoError(t, err)
defer conn.Close()
cfg := mysql.NewConfig()
@@ -155,9 +156,9 @@ func TestStandalone(t *testing.T) {
assertTabletsPresent(t)
err = localCluster.TearDown()
- require.Nil(t, err)
+ require.NoError(t, err)
err = localCluster.Setup()
- require.Nil(t, err)
+ require.NoError(t, err)
assertInsertedRowsExist(ctx, t, conn, idStart, rowCount)
assertTabletsPresent(t)
@@ -170,7 +171,7 @@ func assertInsertedRowsExist(ctx context.Context, t *testing.T, conn *vtgateconn
"id_start": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(idStart), 10))},
}
res, err := cur.Execute(ctx, "select * from test_table where id >= :id_start", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
assert.Equal(t, rowCount, len(res.Rows))
@@ -179,7 +180,7 @@ func assertInsertedRowsExist(ctx context.Context, t *testing.T, conn *vtgateconn
"id_start": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(idStart), 10))},
}
res, err = cur.Execute(ctx, "select * from test_table where id = :id_start", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, 1, len(res.Rows))
assert.Equal(t, "VARCHAR(\"test1000\")", res.Rows[0][1].String())
}
@@ -200,7 +201,7 @@ func assertRouting(ctx context.Context, t *testing.T, db *sql.DB) {
func assertCanInsertRow(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn) {
cur := conn.Session(ks1+":80-@primary", nil)
_, err := cur.Execute(ctx, "begin", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
i := 0x810000000000000
bindVariables := map[string]*querypb.BindVariable{
@@ -210,10 +211,10 @@ func assertCanInsertRow(ctx context.Context, t *testing.T, conn *vtgateconn.VTGa
}
query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)"
_, err = cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
_, err = cur.Execute(ctx, "commit", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
}
func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateConn, idStart, rowCount int) {
@@ -221,7 +222,7 @@ func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateCo
query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)"
_, err := cur.Execute(ctx, "begin", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
for i := idStart; i < idStart+rowCount; i++ {
bindVariables := map[string]*querypb.BindVariable{
@@ -230,11 +231,11 @@ func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateCo
"keyspace_id": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(i), 10))},
}
_, err = cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
}
_, err = cur.Execute(ctx, "commit", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
}
func assertTabletsPresent(t *testing.T) {
@@ -243,7 +244,7 @@ func assertTabletsPresent(t *testing.T) {
log.Infof("Running vtctlclient with command: %v", tmpCmd.Args)
output, err := tmpCmd.CombinedOutput()
- require.Nil(t, err)
+ require.NoError(t, err)
numPrimary, numReplica, numRdonly, numDash80, num80Dash, numRouted := 0, 0, 0, 0, 0, 0
lines := strings.Split(string(output), "\n")
@@ -302,17 +303,17 @@ func assertTransactionalityAndRollbackObeyed(ctx context.Context, t *testing.T,
}
query := "insert into test_table (id, msg, keyspace_id) values (:id, :msg, :keyspace_id)"
_, err := cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
bindVariables = map[string]*querypb.BindVariable{
"msg": {Type: querypb.Type_VARCHAR, Value: []byte(msg)},
}
res, err := cur.Execute(ctx, "select * from test_table where msg = :msg", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, 1, len(res.Rows))
_, err = cur.Execute(ctx, "begin", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
msg2 := msg + "2"
bindVariables = map[string]*querypb.BindVariable{
@@ -321,15 +322,15 @@ func assertTransactionalityAndRollbackObeyed(ctx context.Context, t *testing.T,
}
query = "update test_table set msg = :msg where id = :id"
_, err = cur.Execute(ctx, query, bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
_, err = cur.Execute(ctx, "rollback", nil)
- require.Nil(t, err)
+ require.NoError(t, err)
bindVariables = map[string]*querypb.BindVariable{
"msg": {Type: querypb.Type_VARCHAR, Value: []byte(msg2)},
}
res, err = cur.Execute(ctx, "select * from test_table where msg = :msg", bindVariables)
- require.Nil(t, err)
+ require.NoError(t, err)
require.Equal(t, 0, len(res.Rows))
}
diff --git a/go/test/endtoend/vtctldclient/cli_test.go b/go/test/endtoend/vtctldclient/cli_test.go
new file mode 100644
index 00000000000..82dbc6658a2
--- /dev/null
+++ b/go/test/endtoend/vtctldclient/cli_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtctldclient
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/protobuf/proto"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command"
+ "vitess.io/vitess/go/protoutil"
+ "vitess.io/vitess/go/vt/vtctl/localvtctldclient"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice"
+)
+
+type fakeServer struct {
+ vtctlservicepb.UnimplementedVtctldServer
+ t testing.TB
+
+ applySchemaRequests []*vtctldatapb.ApplySchemaRequest
+}
+
+func (s *fakeServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySchemaRequest) (*vtctldatapb.ApplySchemaResponse, error) {
+ s.applySchemaRequests = append(s.applySchemaRequests, req)
+ return &vtctldatapb.ApplySchemaResponse{}, nil
+}
+
+func TestApplySchema(t *testing.T) {
+ server := &fakeServer{t: t}
+
+ command.VtctldClientProtocol = "local"
+ localvtctldclient.SetServer(server)
+
+ defer func(argv []string) {
+ os.Args = argv
+ }(append([]string{}, os.Args...))
+
+ os.Args = []string{
+ "vtctldclient",
+ "--server='doesnotmatter'",
+ "ApplySchema",
+ "--sql",
+ `"CREATE TABLE foo(id int not null primary key, name varchar(255)); CREATE TABLE bar (id int not null primary key, foo_id int not null);`,
+ "test",
+ }
+
+ require.NoError(t, command.Root.Execute())
+ expected := &vtctldatapb.ApplySchemaRequest{
+ Keyspace: "test",
+ Sql: []string{
+ `"CREATE TABLE foo(id int not null primary key, name varchar(255)); CREATE TABLE bar (id int not null primary key, foo_id int not null);`,
+ },
+ DdlStrategy: "direct",
+ WaitReplicasTimeout: protoutil.DurationToProto(10 * time.Second),
+ }
+ actual := server.applySchemaRequests[0]
+ assert.True(t, proto.Equal(actual, expected), "ApplySchema received unexpected request (got %v want %v)", actual, expected)
+}
diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go
index e941b78c2cd..ab844a8ffd1 100644
--- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go
+++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go
@@ -48,7 +48,7 @@ var (
PRIMARY KEY (id)
) Engine=InnoDB;`
vschemaDDL = "alter vschema create vindex test_vdx using hash"
- vschemaDDLError = fmt.Sprintf("Error 1105: cannot perform Update on keyspaces/%s/VSchema as the topology server connection is read-only",
+ vschemaDDLError = fmt.Sprintf("Error 1105 (HY000): cannot perform Update on keyspaces/%s/VSchema as the topology server connection is read-only",
keyspaceUnshardedName)
)
diff --git a/go/test/endtoend/vtgate/lookup_test.go b/go/test/endtoend/vtgate/lookup_test.go
index c984459ab97..3294c1898d6 100644
--- a/go/test/endtoend/vtgate/lookup_test.go
+++ b/go/test/endtoend/vtgate/lookup_test.go
@@ -42,6 +42,45 @@ func TestUnownedLookupInsertNull(t *testing.T) {
utils.Exec(t, conn, "insert into t8(id, parent_id, t9_id) VALUES (3, 2, 2)")
}
+func TestLookupUniqueWithAutocommit(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+
+ // conn2 is to check entries in the lookup table
+ conn2, err := mysql.Connect(context.Background(), &vtParams)
+ require.Nil(t, err)
+ defer conn2.Close()
+
+ // Test that all vindex writes are autocommitted outside of any ongoing transactions.
+ //
+ // Also test that autocommited vindex entries are visible inside transactions, as lookups
+ // should also use the autocommit connection.
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (1, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 1", "[[INT64(1)]]")
+
+ utils.Exec(t, conn, "begin")
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (2, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)] [INT64(2)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 2", "[[INT64(2)]]")
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (3, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)] [INT64(2)] [INT64(3)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 3", "[[INT64(3)]]")
+
+ utils.Exec(t, conn, "savepoint sp_foobar")
+
+ utils.Exec(t, conn, "insert into t10(id, sharding_key) VALUES (4, 1)")
+
+ utils.AssertMatches(t, conn2, "select id from t10_id_to_keyspace_id_idx order by id asc", "[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]")
+ utils.AssertMatches(t, conn, "select id from t10 where id = 4", "[[INT64(4)]]")
+}
+
func TestUnownedLookupInsertChecksKeyspaceIdsAreMatching(t *testing.T) {
conn, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go
index f683be5cef1..1d2bc59b50a 100644
--- a/go/test/endtoend/vtgate/main_test.go
+++ b/go/test/endtoend/vtgate/main_test.go
@@ -110,7 +110,7 @@ func start(t *testing.T) (*mysql.Conn, func()) {
deleteAll := func() {
utils.Exec(t, conn, "use ks")
- tables := []string{"t1", "t2", "vstream_test", "t3", "t4", "t6", "t7_xxhash", "t7_xxhash_idx", "t7_fk", "t8", "t9", "t9_id_to_keyspace_id_idx", "t1_id2_idx", "t2_id4_idx", "t3_id7_idx", "t4_id2_idx", "t5_null_vindex", "t6_id2_idx"}
+ tables := []string{"t1", "t2", "vstream_test", "t3", "t4", "t6", "t7_xxhash", "t7_xxhash_idx", "t7_fk", "t8", "t9", "t9_id_to_keyspace_id_idx", "t10", "t10_id_to_keyspace_id_idx", "t1_id2_idx", "t2_id4_idx", "t3_id7_idx", "t4_id2_idx", "t5_null_vindex", "t6_id2_idx"}
for _, table := range tables {
_, _ = utils.ExecAllowError(t, conn, "delete from "+table)
}
diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go
index cf47ad6a70f..b74947d63e9 100644
--- a/go/test/endtoend/vtgate/partialfailure/main_test.go
+++ b/go/test/endtoend/vtgate/partialfailure/main_test.go
@@ -22,12 +22,11 @@ import (
"os"
"testing"
- "vitess.io/vitess/go/test/endtoend/utils"
-
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/utils"
)
var (
@@ -99,39 +98,54 @@ CREATE TABLE test_vdx (
`
)
+var enableSettingsPool bool
+
func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
- exitCode := func() int {
- clusterInstance = cluster.NewCluster(cell, hostname)
- defer clusterInstance.Teardown()
+ code := runAllTests(m)
+ if code != 0 {
+ os.Exit(code)
+ }
- // Start topo server
- if err := clusterInstance.StartTopo(); err != nil {
- return 1
- }
+ println("running with settings pool enabled")
+ // run again with settings pool enabled.
+ enableSettingsPool = true
+ code = runAllTests(m)
+ os.Exit(code)
+}
- // Start keyspace
- keyspace := &cluster.Keyspace{
- Name: keyspaceName,
- SchemaSQL: sqlSchema,
- VSchema: vSchema,
- }
- if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 0, false); err != nil {
+func runAllTests(m *testing.M) int {
+ clusterInstance = cluster.NewCluster(cell, hostname)
+ defer clusterInstance.Teardown()
- return 1
- }
+ // Start topo server
+ if err := clusterInstance.StartTopo(); err != nil {
+ return 1
+ }
- // Start vtgate
- clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--planner-version", "Gen4Fallback")
- if err := clusterInstance.StartVtgate(); err != nil {
- return 1
- }
- vtParams = clusterInstance.GetVTParams(keyspaceName)
- return m.Run()
- }()
- os.Exit(exitCode)
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: sqlSchema,
+ VSchema: vSchema,
+ }
+ if enableSettingsPool {
+ clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool")
+ }
+ if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 0, false); err != nil {
+
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--planner-version", "Gen4Fallback")
+ if err := clusterInstance.StartVtgate(); err != nil {
+ return 1
+ }
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+ return m.Run()
}
func testAllModes(t *testing.T, stmts func(conn *mysql.Conn)) {
diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
index dfcfcc0c426..585ea706e29 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
@@ -33,7 +33,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
deleteAll := func() {
_, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp")
- tables := []string{"aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2"}
+ tables := []string{"t9", "aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2", "t11"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -365,3 +365,87 @@ func TestEmptyTableAggr(t *testing.T) {
}
}
+
+func TestOrderByCount(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t9(id1, id2, id3) values(1, '1', '1'), (2, '2', '2'), (3, '2', '2'), (4, '3', '3'), (5, '3', '3'), (6, '3', '3')")
+
+ mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`)
+}
+
+func TestAggregateRandom(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (1, 'name 1', 'value 1', 1), (2, 'name 2', 'value 2', 2)")
+ mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)")
+
+ utils.Exec(t, mcmp.VtConn, "set sql_mode=''")
+ utils.Exec(t, mcmp.MySQLConn, "set sql_mode=''")
+ mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t1.shardKey, t1.name, count(t2.id) FROM t1 JOIN t2 ON t1.value != t2.shardKey GROUP BY t1.t1_id", `[[INT64(1) VARCHAR("name 1") INT64(2)] [INT64(2) VARCHAR("name 2") INT64(2)]]`)
+}
+
+// TestAggregateLeftJoin tests that aggregates work with left joins and does not ignore the count when column value does not match the right side table.
+func TestAggregateLeftJoin(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (11, 'r', 'r', 1), (3, 'r', 'r', 0)")
+ mcmp.Exec("insert into t2(id, shardKey) values (11, 1)")
+
+ mcmp.AssertMatchesNoOrder("SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(1)] [INT64(0)]]`)
+ mcmp.AssertMatches("SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`)
+ mcmp.AssertMatches("SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`)
+ mcmp.AssertMatches("SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`)
+}
+
+// TestScalarAggregate tests validates that only count is returned and no additional field is returned.gst
+func TestScalarAggregate(t *testing.T) {
+ // disable schema tracking to have weight_string column added to query send down to mysql.
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal=false")
+ require.NoError(t,
+ clusterInstance.RestartVtgate())
+
+ // update vtgate params
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+
+ defer func() {
+ // roll it back
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal")
+ require.NoError(t,
+ clusterInstance.RestartVtgate())
+ // update vtgate params
+ vtParams = clusterInstance.GetVTParams(keyspaceName)
+
+ }()
+
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)")
+ mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(distinct val1) from aggr_test", `[[INT64(3)]]`)
+}
+
+func TestAggregationRandomOnAnAggregatedValue(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t11(k, a, b) values (0, 100, 10), (10, 200, 20);")
+
+ mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from t11 where a = 100) A;",
+ `[[DECIMAL(100) DECIMAL(10) DECIMAL(10.0000)]]`)
+}
+
+func TestBuggyQueries(t *testing.T) {
+ // These queries have been found to be producing the wrong results by the query fuzzer
+ // Adding them as end2end tests to make sure we never get them wrong again
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t11(k, a, b) values (0, 100, 10), (10, 200, 20), (20, null, null)")
+
+ mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ sum(t1.a) from t11 as t1, t11 as t2",
+ `[[DECIMAL(900)]]`)
+}
diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go
index 65cf3a0343b..a859002f44a 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go
@@ -33,8 +33,8 @@ var (
clusterInstance *cluster.LocalProcessCluster
vtParams mysql.ConnParams
mysqlParams mysql.ConnParams
- keyspaceName = "ks_union"
- cell = "test_union"
+ keyspaceName = "ks_aggr"
+ cell = "test_aggr"
//go:embed schema.sql
schemaSQL string
diff --git a/go/test/endtoend/vtgate/queries/aggregation/schema.sql b/go/test/endtoend/vtgate/queries/aggregation/schema.sql
index 944c3783048..0b59819dda5 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/schema.sql
+++ b/go/test/endtoend/vtgate/queries/aggregation/schema.sql
@@ -69,3 +69,9 @@ CREATE TABLE t2 (
shardKey bigint,
PRIMARY KEY (id)
) ENGINE InnoDB;
+
+CREATE TABLE t11 (
+ k BIGINT PRIMARY KEY,
+ a INT,
+ b INT
+);
diff --git a/go/test/endtoend/vtgate/queries/aggregation/vschema.json b/go/test/endtoend/vtgate/queries/aggregation/vschema.json
index c2d3f133a35..727d6adc1d0 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/vschema.json
+++ b/go/test/endtoend/vtgate/queries/aggregation/vschema.json
@@ -123,6 +123,14 @@
"name": "hash"
}
]
+ },
+ "t11": {
+ "column_vindexes": [
+ {
+ "column": "k",
+ "name": "hash"
+ }
+ ]
}
}
}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go
index 5da8d8bac9b..62601ed528d 100644
--- a/go/test/endtoend/vtgate/queries/derived/derived_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go
@@ -30,7 +30,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
require.NoError(t, err)
deleteAll := func() {
- tables := []string{"music"}
+ tables := []string{"music", "user"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -56,6 +56,7 @@ func TestDerivedTableWithOrderByLimit(t *testing.T) {
}
func TestDerivedAggregationOnRHS(t *testing.T) {
+ t.Skip("skipped for now, issue: https://github.com/vitessio/vitess/issues/11703")
mcmp, closer := start(t)
defer closer()
@@ -84,10 +85,7 @@ func TestDerivedTableWithHaving(t *testing.T) {
mcmp.Exec("insert into user(id, name) values(1,'toto'), (2,'tata'), (3,'titi'), (4,'tete'), (5,'foo')")
mcmp.Exec("set sql_mode = ''")
-
- // this is probably flaky? the id returned from the derived table could be any of the ids from user.
- // works on my machine (TM)
- mcmp.Exec("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s")
+ mcmp.AssertMatchesAnyNoCompare("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s", "[[INT64(1)]]", "[[INT64(4)]]")
}
func TestDerivedTableColumns(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go
index 22ca01d32b0..d7c904dee3d 100644
--- a/go/test/endtoend/vtgate/queries/dml/insert_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go
@@ -384,3 +384,13 @@ func TestInsertSelectUnshardedUsingSharded(t *testing.T) {
})
}
}
+
+func TestRedactDupError(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,100),(1,2,200),(1,3,300)")
+
+ // inserting same rows, throws error.
+ mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `BindVars: {REDACTED}`)
+}
diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go
index de3c7897ae0..7fb361837f8 100644
--- a/go/test/endtoend/vtgate/queries/dml/main_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/main_test.go
@@ -98,6 +98,8 @@ func TestMain(m *testing.M) {
return 1
}
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--vtgate-config-terse-errors")
+
// Start vtgate
clusterInstance.VtGatePlannerVersion = planbuilder.Gen4
err = clusterInstance.StartVtgate()
diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
index be902e5bf0e..0b7b72a4f25 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
+++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
@@ -38,10 +38,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
deleteAll := func() {
_, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp")
- tables := []string{
- "t1", "t1_id2_idx", "vstream_test", "t2", "t2_id4_idx", "t3", "t3_id7_idx", "t4",
- "t4_id2_idx", "t5_null_vindex", "t6", "t6_id2_idx", "t7_xxhash", "t7_xxhash_idx", "t7_fk", "t8",
- }
+ tables := []string{"t1", "t1_id2_idx", "t7_xxhash", "t7_xxhash_idx", "t7_fk"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -206,3 +203,24 @@ func TestMultipleSchemaPredicates(t *testing.T) {
require.Error(t, err)
require.Contains(t, err.Error(), "specifying two different database in the query is not supported")
}
+
+func TestInfrSchemaAndUnionAll(t *testing.T) {
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--planner-version=gen4")
+ require.NoError(t,
+ clusterInstance.RestartVtgate())
+
+ vtConnParams := clusterInstance.GetVTParams(keyspaceName)
+ vtConnParams.DbName = keyspaceName
+ conn, err := mysql.Connect(context.Background(), &vtConnParams)
+ require.NoError(t, err)
+
+ for _, workload := range []string{"oltp", "olap"} {
+ t.Run(workload, func(t *testing.T) {
+ utils.Exec(t, conn, fmt.Sprintf("set workload = %s", workload))
+ utils.Exec(t, conn, "start transaction")
+ utils.Exec(t, conn, `select connection_id()`)
+ utils.Exec(t, conn, `(select 'corder' from t1 limit 1) union all (select 'customer' from t7_xxhash limit 1)`)
+ utils.Exec(t, conn, "rollback")
+ })
+ }
+}
diff --git a/go/test/endtoend/vtgate/queries/informationschema/schema.sql b/go/test/endtoend/vtgate/queries/informationschema/schema.sql
index f34e4e8c5bd..1fc9949406b 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/schema.sql
+++ b/go/test/endtoend/vtgate/queries/informationschema/schema.sql
@@ -12,87 +12,6 @@ create table t1_id2_idx
primary key (id2)
) Engine = InnoDB;
-create table vstream_test
-(
- id bigint,
- val bigint,
- primary key (id)
-) Engine = InnoDB;
-
-create table t2
-(
- id3 bigint,
- id4 bigint,
- primary key (id3)
-) Engine = InnoDB;
-
-create table t2_id4_idx
-(
- id bigint not null auto_increment,
- id4 bigint,
- id3 bigint,
- primary key (id),
- key idx_id4 (id4)
-) Engine = InnoDB;
-
-create table t3
-(
- id5 bigint,
- id6 bigint,
- id7 bigint,
- primary key (id5)
-) Engine = InnoDB;
-
-create table t3_id7_idx
-(
- id bigint not null auto_increment,
- id7 bigint,
- id6 bigint,
- primary key (id)
-) Engine = InnoDB;
-
-create table t4
-(
- id1 bigint,
- id2 varchar(10),
- primary key (id1)
-) ENGINE = InnoDB
- DEFAULT charset = utf8mb4
- COLLATE = utf8mb4_general_ci;
-
-create table t4_id2_idx
-(
- id2 varchar(10),
- id1 bigint,
- keyspace_id varbinary(50),
- primary key (id2, id1)
-) Engine = InnoDB
- DEFAULT charset = utf8mb4
- COLLATE = utf8mb4_general_ci;
-
-create table t5_null_vindex
-(
- id bigint not null,
- idx varchar(50),
- primary key (id)
-) Engine = InnoDB;
-
-create table t6
-(
- id1 bigint,
- id2 varchar(10),
- primary key (id1)
-) Engine = InnoDB;
-
-create table t6_id2_idx
-(
- id2 varchar(10),
- id1 bigint,
- keyspace_id varbinary(50),
- primary key (id1),
- key (id2)
-) Engine = InnoDB;
-
create table t7_xxhash
(
uid varchar(50),
@@ -116,10 +35,3 @@ create table t7_fk
CONSTRAINT t7_fk_ibfk_1 foreign key (t7_uid) references t7_xxhash (uid)
on delete set null on update cascade
) Engine = InnoDB;
-
-create table t8
-(
- id8 bigint,
- testId bigint,
- primary key (id8)
-) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/queries/informationschema/vschema.json b/go/test/endtoend/vtgate/queries/informationschema/vschema.json
index b440e3905dc..eec57e9970d 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/vschema.json
+++ b/go/test/endtoend/vtgate/queries/informationschema/vschema.json
@@ -7,12 +7,12 @@
"unicode_loose_xxhash" : {
"type": "unicode_loose_xxhash"
},
- "t3_id7_vdx": {
- "type": "lookup_hash",
+ "t1_id2_idx": {
+ "type": "lookup_unique",
"params": {
- "table": "t3_id7_idx",
- "from": "id7",
- "to": "id6"
+ "table": "t1_id2_idx",
+ "from": "id2",
+ "to": "keyspace_id"
},
"owner": "t3"
},
@@ -28,15 +28,15 @@
}
},
"tables": {
- "t3": {
+ "t1": {
"column_vindexes": [
{
- "column": "id6",
+ "column": "id1",
"name": "hash"
},
{
- "column": "id7",
- "name": "t3_id7_vdx"
+ "column": "id2",
+ "name": "t1_id2_idx"
}
]
},
@@ -48,46 +48,6 @@
}
]
},
- "t9": {
- "column_vindexes": [
- {
- "column": "id1",
- "name": "hash"
- }
- ]
- },
- "aggr_test": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash"
- }
- ],
- "columns": [
- {
- "name": "val1",
- "type": "VARCHAR"
- }
- ]
- },
- "aggr_test_dates": {
- "column_vindexes": [
- {
- "column": "id",
- "name": "hash"
- }
- ],
- "columns": [
- {
- "name": "val1",
- "type": "DATETIME"
- },
- {
- "name": "val2",
- "type": "DATETIME"
- }
- ]
- },
"t7_xxhash": {
"column_vindexes": [
{
diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go
index fb66f395f9e..d2e1dcd0996 100644
--- a/go/test/endtoend/vtgate/queries/misc/misc_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go
@@ -17,8 +17,14 @@ limitations under the License.
package misc
import (
+ "database/sql"
+ "fmt"
+ "strconv"
+ "strings"
"testing"
+ _ "github.com/go-sql-driver/mysql"
+
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
@@ -89,3 +95,78 @@ func TestInvalidDateTimeTimestampVals(t *testing.T) {
_, err = mcmp.ExecAllowAndCompareError(`select timestamp'2022'`)
require.Error(t, err)
}
+
+// TestIntervalWithMathFunctions tests that the Interval keyword can be used with math functions.
+func TestIntervalWithMathFunctions(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // Set the time zone explicitly to UTC, otherwise the output of FROM_UNIXTIME is going to be dependent
+ // on the time zone of the system.
+ mcmp.Exec("SET time_zone = '+00:00'")
+ mcmp.AssertMatches("select '2020-01-01' + interval month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month))-1 month", `[[CHAR("2020-12-01")]]`)
+ mcmp.AssertMatches("select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)", `[[DATETIME("2023-01-08 13:48:42")]]`)
+}
+
+// TestCast tests the queries that contain the cast function.
+func TestCast(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.AssertMatches("select cast('2023-01-07 12:34:56' as date) limit 1", `[[DATE("2023-01-07")]]`)
+ mcmp.AssertMatches("select cast('2023-01-07 12:34:56' as date)", `[[DATE("2023-01-07")]]`)
+ mcmp.AssertMatches("select cast('3.2' as float)", `[[FLOAT32(3.2)]]`)
+ mcmp.AssertMatches("select cast('3.2' as double)", `[[FLOAT64(3.2)]]`)
+ mcmp.AssertMatches("select cast('3.2' as unsigned)", `[[UINT64(3)]]`)
+}
+
+// This test ensures that we support PREPARE statement with 65530 parameters.
+// It opens a MySQL connection using the go-mysql driver and execute a select query
+// it then checks the result contains the proper rows and that it's not failing.
+func TestHighNumberOfParams(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(id1) values (0), (1), (2), (3), (4)")
+
+ paramCount := 65530
+
+ // create the value and argument slices used to build the prepare stmt
+ var vals []any
+ var params []string
+ for i := 0; i < paramCount; i++ {
+ vals = append(vals, strconv.Itoa(i))
+ params = append(params, "?")
+ }
+
+ // connect to the vitess cluster
+ db, err := sql.Open("mysql", fmt.Sprintf("@tcp(%s:%v)/%s", vtParams.Host, vtParams.Port, vtParams.DbName))
+ require.NoError(t, err)
+
+ // run the query
+ r, err := db.Query(fmt.Sprintf("SELECT /*vt+ QUERY_TIMEOUT_MS=10000 */ id1 FROM t1 WHERE id1 in (%s) ORDER BY id1 ASC", strings.Join(params, ", ")), vals...)
+ require.NoError(t, err)
+
+ // check the results we got, we should get 5 rows with each: 0, 1, 2, 3, 4
+ // count is the row number we are currently visiting, also correspond to the
+ // column value we expect.
+ count := 0
+ for r.Next() {
+ j := -1
+ err := r.Scan(&j)
+ require.NoError(t, err)
+ require.Equal(t, j, count)
+ count++
+ }
+ require.Equal(t, 5, count)
+}
+
+func TestBuggyOuterJoin(t *testing.T) {
+ // We found a couple of inconsistencies around outer joins, adding these tests to stop regressions
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec("insert into t1(id1, id2) values (1,2), (42,5), (5, 42)")
+
+ mcmp.Exec("select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2")
+}
diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
index d955c4b2d06..cdc00e07a21 100644
--- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
+++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
@@ -58,14 +58,15 @@ func TestSubqueriesHasValues(t *testing.T) {
mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`)
}
-// Test only supported in >= v14.0.0
+// Test only supported in >= v15.0.0
func TestSubqueriesExists(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 14, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 15, "vtgate")
mcmp, closer := start(t)
defer closer()
mcmp.Exec("insert into t1(id1, id2) values (0,1),(1,2),(2,3),(3,4),(4,5),(5,6)")
mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE EXISTS (SELECT id1 FROM t1 WHERE id1 > 0) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`)
+ mcmp.AssertMatches(`select * from (select 1) as tmp where exists(select 1 from t1 where id1 = 1)`, `[[INT32(1)]]`)
}
func TestQueryAndSubQWithLimit(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/schema.sql b/go/test/endtoend/vtgate/schema.sql
index c597bd7e53e..536bec397ec 100644
--- a/go/test/endtoend/vtgate/schema.sql
+++ b/go/test/endtoend/vtgate/schema.sql
@@ -138,3 +138,17 @@ create table t9_id_to_keyspace_id_idx
keyspace_id varbinary(10),
primary key (id)
) Engine = InnoDB;
+
+create table t10
+(
+ id bigint,
+ sharding_key bigint,
+ primary key (id)
+) Engine = InnoDB;
+
+create table t10_id_to_keyspace_id_idx
+(
+ id bigint,
+ keyspace_id varbinary(10),
+ primary key (id)
+) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go
index a603bc1c89b..7b85165cd10 100644
--- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go
+++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go
@@ -19,11 +19,14 @@ package loadkeyspace
import (
"os"
"path"
+ "strings"
"testing"
"time"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/test/endtoend/utils"
+
"vitess.io/vitess/go/test/endtoend/cluster"
)
@@ -115,6 +118,9 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) {
for _, vttablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets {
err = vttablet.VttabletProcess.TearDown()
require.NoError(t, err)
+ utils.TimeoutAction(t, 1*time.Minute, "timeout - teardown of VTTablet", func() bool {
+ return vttablet.VttabletProcess.GetStatus() == ""
+ })
}
// Start vtgate with the schema_change_signal flag
@@ -122,11 +128,13 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) {
err = clusterInstance.StartVtgate()
require.NoError(t, err)
- // check warning logs
- logDir := clusterInstance.VtgateProcess.LogDir
- all, err := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt"))
- require.NoError(t, err)
- require.Contains(t, string(all), "Unable to get initial schema reload")
+ // After starting VTGate we need to leave enough time for resolveAndLoadKeyspace to reach
+ // the schema tracking timeout (5 seconds).
+ utils.TimeoutAction(t, 5*time.Minute, "timeout - could not find 'Unable to get initial schema reload' in 'vtgate-stderr.txt'", func() bool {
+ logDir := clusterInstance.VtgateProcess.LogDir
+ all, _ := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt"))
+ return strings.Contains(string(all), "Unable to get initial schema reload")
+ })
}
func TestNoInitialKeyspace(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go
index 9d3de02f846..fa0fa2e4672 100644
--- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go
+++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go
@@ -147,18 +147,20 @@ func TestVSchemaTrackerKeyspaceReInit(t *testing.T) {
require.NoError(t, err)
err = clusterInstance.WaitForTabletsToHealthyInVtgate()
require.NoError(t, err)
- time.Sleep(time.Duration(signalInterval*2) * time.Second)
- var newResults any
- readVSchema(t, &clusterInstance.VtgateProcess, &newResults)
- assert.Equal(t, originalResults, newResults)
- newResults = nil
+
+ utils.TimeoutAction(t, 1*time.Minute, "timeout - could not find the updated vschema in VTGate", func() bool {
+ var newResults any
+ readVSchema(t, &clusterInstance.VtgateProcess, &newResults)
+ return assert.ObjectsAreEqual(originalResults, newResults)
+ })
}
}
func readVSchema(t *testing.T, vtgate *cluster.VtgateProcess, results *any) {
httpClient := &http.Client{Timeout: 5 * time.Second}
resp, err := httpClient.Get(vtgate.VSchemaURL)
- require.Nil(t, err)
+ require.NoError(t, err)
+ defer resp.Body.Close()
assert.Equal(t, 200, resp.StatusCode)
json.NewDecoder(resp.Body).Decode(results)
}
diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go b/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go
index 276664c74fd..d2fcf07810c 100644
--- a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go
+++ b/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go
@@ -102,9 +102,11 @@ func TestSchemaTrackingError(t *testing.T) {
case <-timeout:
t.Error("timeout waiting for schema tracking error")
case <-time.After(1 * time.Second):
- // check info logs
+ // check info logs, continue if the file could not be read correctly.
all, err := os.ReadFile(path.Join(logDir, "vtgate.WARNING"))
- require.NoError(t, err)
+ if err != nil {
+ continue
+ }
if strings.Contains(string(all), "Table ACL might be enabled, --schema_change_signal_user needs to be passed to VTGate for schema tracking to work. Check 'schema tracking' docs on vitess.io") {
present = true
}
diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go
index 0d72b870955..e655b7d7034 100644
--- a/go/test/endtoend/vtgate/sequence/seq_test.go
+++ b/go/test/endtoend/vtgate/sequence/seq_test.go
@@ -47,9 +47,9 @@ var (
)Engine=InnoDB;
create table sequence_test_seq (
- id int default 0,
- next_id bigint default null,
- cache bigint default null,
+ id int default 0,
+ next_id bigint default null,
+ cache bigint default null,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB;
@@ -60,13 +60,13 @@ INSERT INTO id_seq (id, next_id, cache) values (0, 1, 1000);
`
unshardedVSchema = `
- {
+ {
"sharded":false,
"vindexes": {
"hash_index": {
"type": "hash"
}
- },
+ },
"tables": {
"sequence_test":{
"auto_increment":{
@@ -147,7 +147,7 @@ CREATE TABLE allDefaults (
"column": "id",
"sequence": "id_seq"
}
- },
+ },
"allDefaults": {
"columnVindexes": [
{
@@ -264,6 +264,12 @@ func TestSeq(t *testing.T) {
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("wrong insert: %v, must contain %s", err, want)
}
+
+ utils.Exec(t, conn, "DELETE FROM sequence_test_seq")
+ qr = utils.Exec(t, conn, "select * from sequence_test_seq")
+ if got, want := fmt.Sprintf("%v", qr.Rows), `[]`; got != want {
+ t.Errorf("select:\n%v want\n%v", got, want)
+ }
}
func TestDotTableSeq(t *testing.T) {
diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go
index bcfdbd51a8f..4a0160e0fa5 100644
--- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go
+++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go
@@ -215,7 +215,7 @@ func addTablet(t *testing.T, tabletUID int, tabletType string) *cluster.Vttablet
serving := tablet.VttabletProcess.WaitForStatus("SERVING", time.Duration(60*time.Second))
assert.Equal(t, serving, true, "Tablet did not become ready within a reasonable time")
err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.%s",
- tablet.VttabletProcess.Keyspace, tablet.VttabletProcess.Shard, tablet.Type), 1)
+ tablet.VttabletProcess.Keyspace, tablet.VttabletProcess.Shard, tablet.Type), 1, 30*time.Second)
require.Nil(t, err)
t.Logf("Added tablet: %s", tablet.Alias)
diff --git a/go/test/endtoend/vtgate/transaction/restart/main_test.go b/go/test/endtoend/vtgate/transaction/restart/main_test.go
new file mode 100644
index 00000000000..3c7ac710e9d
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/restart/main_test.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package misc
+
+import (
+ "context"
+ _ "embed"
+ "flag"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/utils"
+)
+
+var (
+ clusterInstance *cluster.LocalProcessCluster
+ vtParams mysql.ConnParams
+ keyspaceName = "ks"
+ cell = "test"
+
+ //go:embed schema.sql
+ schemaSQL string
+)
+
+func TestMain(m *testing.M) {
+ defer cluster.PanicHandler(nil)
+ flag.Parse()
+
+ exitCode := func() int {
+ clusterInstance = cluster.NewCluster(cell, "localhost")
+ defer clusterInstance.Teardown()
+
+ // Start topo server
+ err := clusterInstance.StartTopo()
+ if err != nil {
+ return 1
+ }
+
+ // Start keyspace
+ keyspace := &cluster.Keyspace{
+ Name: keyspaceName,
+ SchemaSQL: schemaSQL,
+ }
+ err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false)
+ if err != nil {
+ return 1
+ }
+
+ // Start vtgate
+ clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
+ "--planner-version=gen4",
+ "--mysql_default_workload=olap")
+ err = clusterInstance.StartVtgate()
+ if err != nil {
+ return 1
+ }
+
+ vtParams = mysql.ConnParams{
+ Host: clusterInstance.Hostname,
+ Port: clusterInstance.VtgateMySQLPort,
+ }
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+/*
+TestStreamTxRestart tests that when a connection is killed my mysql (may be due to restart),
+then the transaction should not continue to serve the query via reconnect.
+*/
+func TestStreamTxRestart(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &vtParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ utils.Exec(t, conn, "begin")
+ // BeginStreamExecute
+ _ = utils.Exec(t, conn, "select connection_id()")
+
+ // StreamExecute
+ _ = utils.Exec(t, conn, "select connection_id()")
+
+ // restart the mysql to terminate all the existing connections.
+ primTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()
+ err = primTablet.MysqlctlProcess.Stop()
+ require.NoError(t, err)
+ err = primTablet.MysqlctlProcess.StartProvideInit(false)
+ require.NoError(t, err)
+
+ // query should return connection error
+ _, err = utils.ExecAllowError(t, conn, "select connection_id()")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "broken pipe (errno 2006) (sqlstate HY000)")
+}
diff --git a/go/test/endtoend/vtgate/transaction/restart/schema.sql b/go/test/endtoend/vtgate/transaction/restart/schema.sql
new file mode 100644
index 00000000000..3e78cab09d6
--- /dev/null
+++ b/go/test/endtoend/vtgate/transaction/restart/schema.sql
@@ -0,0 +1,5 @@
+create table t1(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vschema.json b/go/test/endtoend/vtgate/vschema.json
index 3aafd1106b5..8d16beec2a6 100644
--- a/go/test/endtoend/vtgate/vschema.json
+++ b/go/test/endtoend/vtgate/vschema.json
@@ -79,6 +79,16 @@
"to": "keyspace_id"
},
"owner": "t9"
+ },
+ "t10_id_to_keyspace_id_idx": {
+ "type": "lookup_unique",
+ "params": {
+ "autocommit": "true",
+ "table": "t10_id_to_keyspace_id_idx",
+ "from": "id",
+ "to": "keyspace_id"
+ },
+ "owner": "t10"
}
},
"tables": {
@@ -271,6 +281,26 @@
"name": "hash"
}
]
+ },
+ "t10": {
+ "column_vindexes": [
+ {
+ "column": "sharding_key",
+ "name": "hash"
+ },
+ {
+ "column": "id",
+ "name": "t10_id_to_keyspace_id_idx"
+ }
+ ]
+ },
+ "t10_id_to_keyspace_id_idx": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
}
}
}
diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go
index 87312004f7a..4885a67aa9c 100644
--- a/go/test/endtoend/vtorc/api/api_test.go
+++ b/go/test/endtoend/vtorc/api/api_test.go
@@ -107,10 +107,20 @@ func TestProblemsAPI(t *testing.T) {
assert.Equal(t, 200, status, resp)
assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort))
+ // Verify that filtering by keyspace also works in the API as intended
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks")
+ assert.Equal(t, 200, status, resp)
+ assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort))
+
// Check that filtering using keyspace and shard works
status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=80-")
assert.Equal(t, 200, status, resp)
assert.Equal(t, "[]", resp)
+
+ // Check that filtering using just the shard fails
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?shard=0")
+ assert.Equal(t, 400, status, resp)
+ assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp)
})
t.Run("Enable Recoveries API", func(t *testing.T) {
@@ -150,9 +160,19 @@ func TestProblemsAPI(t *testing.T) {
assert.Equal(t, 200, status, resp)
assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias))
+ // Check that filtering using keyspace works
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks")
+ assert.Equal(t, 200, status, resp)
+ assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias))
+
// Check that filtering using keyspace and shard works
status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks&shard=80-")
assert.Equal(t, 200, status, resp)
assert.Equal(t, "null", resp)
+
+ // Check that filtering using just the shard fails
+ status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?shard=0")
+ assert.Equal(t, 400, status, resp)
+ assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp)
})
}
diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go
index 4254606dd94..c0a845a5699 100644
--- a/go/test/endtoend/vtorc/general/vtorc_test.go
+++ b/go/test/endtoend/vtorc/general/vtorc_test.go
@@ -37,6 +37,7 @@ import (
// verify replication is setup
// verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call
func TestPrimaryElection(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -64,6 +65,7 @@ func TestPrimaryElection(t *testing.T) {
// verify rdonly is not elected, only replica
// verify replication is setup
func TestSingleKeyspace(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -81,6 +83,7 @@ func TestSingleKeyspace(t *testing.T) {
// verify rdonly is not elected, only replica
// verify replication is setup
func TestKeyspaceShard(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -100,6 +103,7 @@ func TestKeyspaceShard(t *testing.T) {
// 4. setup replication from non-primary, let vtorc repair
// 5. make instance A replicates from B and B from A, wait for repair
func TestVTOrcRepairs(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -216,6 +220,7 @@ func TestVTOrcRepairs(t *testing.T) {
func TestRepairAfterTER(t *testing.T) {
// test fails intermittently on CI, skip until it can be fixed.
t.SkipNow()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -252,6 +257,7 @@ func TestSemiSync(t *testing.T) {
// stop any vtorc instance running due to a previous test.
utils.StopVTOrcs(t, clusterInfo)
newCluster := utils.SetupNewClusterSemiSync(t)
+ defer utils.PrintVTOrcLogsOnFailure(t, newCluster.ClusterInstance)
utils.StartVTOrcs(t, newCluster, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
}, 1)
@@ -316,6 +322,7 @@ func TestSemiSync(t *testing.T) {
// TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld
func TestVTOrcWithPrs(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -364,6 +371,7 @@ func TestVTOrcWithPrs(t *testing.T) {
// TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies
func TestMultipleDurabilities(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
// Setup a normal cluster and start vtorc
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "")
@@ -388,6 +396,7 @@ func TestDurabilityPolicySetLater(t *testing.T) {
// stop any vtorc instance running due to a previous test.
utils.StopVTOrcs(t, clusterInfo)
newCluster := utils.SetupNewClusterSemiSync(t)
+ defer utils.PrintVTOrcLogsOnFailure(t, newCluster.ClusterInstance)
keyspace := &newCluster.ClusterInstance.Keyspaces[0]
shard0 := &keyspace.Shards[0]
// Before starting VTOrc we explicity want to set the durability policy of the keyspace to an empty string
diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go
index 8e9d622fd80..7d9c57b6b22 100644
--- a/go/test/endtoend/vtorc/primaryfailure/main_test.go
+++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go
@@ -21,9 +21,8 @@ import (
"os"
"testing"
- "vitess.io/vitess/go/test/endtoend/vtorc/utils"
-
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/vtorc/utils"
)
var clusterInfo *utils.VTOrcClusterInfo
@@ -34,7 +33,7 @@ func TestMain(m *testing.M) {
cellInfos = append(cellInfos, &utils.CellInfo{
CellName: utils.Cell1,
NumReplicas: 12,
- NumRdonly: 2,
+ NumRdonly: 3,
UIDBase: 100,
})
cellInfos = append(cellInfos, &utils.CellInfo{
diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go
index 01bf01782e7..0ac4129fd8b 100644
--- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go
+++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go
@@ -20,22 +20,26 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/test/endtoend/vtorc/utils"
- "vitess.io/vitess/go/vt/vtorc/logic"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
+ "vitess.io/vitess/go/test/endtoend/vtorc/utils"
+ "vitess.io/vitess/go/vt/vtorc/logic"
)
// bring down primary, let orc promote replica
// covers the test case master-failover from orchestrator
+// Also tests that VTOrc can handle multiple failures, if the durability policies allow it
func TestDownPrimary(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
- utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
+ // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test.
+ // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source.
+ // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test.
+ utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
- }, 1, "")
+ }, 1, "semi_sync")
keyspace := &clusterInfo.ClusterInstance.Keyspaces[0]
shard0 := &keyspace.Shards[0]
// find primary from topo
@@ -58,27 +62,39 @@ func TestDownPrimary(t *testing.T) {
assert.NotNil(t, replica, "could not find replica tablet")
assert.NotNil(t, rdonly, "could not find rdonly tablet")
+ // Start a cross-cell replica
+ crossCellReplica := utils.StartVttablet(t, clusterInfo, utils.Cell2, false)
+
// check that the replication is setup correctly before we failover
- utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica}, 10*time.Second)
+ utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica, crossCellReplica}, 10*time.Second)
- // Make the current primary database unavailable.
- err := curPrimary.MysqlctlProcess.Stop()
+ // Make the rdonly vttablet unavailable
+ err := rdonly.VttabletProcess.TearDown()
+ require.NoError(t, err)
+ err = rdonly.MysqlctlProcess.Stop()
+ require.NoError(t, err)
+ // Make the current primary vttablet unavailable.
+ err = curPrimary.VttabletProcess.TearDown()
+ require.NoError(t, err)
+ err = curPrimary.MysqlctlProcess.Stop()
require.NoError(t, err)
defer func() {
- // we remove the tablet from our global list since its mysqlctl process has stopped and cannot be reused for other tests
+ // we remove the tablet from our global list
utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary)
+ utils.PermanentlyRemoveVttablet(clusterInfo, rdonly)
}()
// check that the replica gets promoted
utils.CheckPrimaryTablet(t, clusterInfo, replica, true)
// also check that the replication is working correctly after failover
- utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{rdonly}, 10*time.Second)
+ utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica}, 10*time.Second)
utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1)
}
// Failover should not be cross data centers, according to the configuration file
// covers part of the test case master-failover-lost-replicas from orchestrator
func TestCrossDataCenterFailure(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -124,6 +140,7 @@ func TestCrossDataCenterFailure(t *testing.T) {
// Failover should not be cross data centers, according to the configuration file
// In case of no viable candidates, we should error out
func TestCrossDataCenterFailureError(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -170,6 +187,7 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) {
// Earlier any replicas that were not able to replicate from the previous primary
// were detected by vtorc and could be configured to have their sources detached
t.Skip()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{
PreventCrossDataCenterPrimaryFailover: true,
@@ -251,6 +269,7 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) {
// This test checks that the promotion of a tablet succeeds if it passes the promotion lag test
// covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator
func TestPromotionLagSuccess(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
ReplicationLagQuery: "select 59",
@@ -300,6 +319,7 @@ func TestPromotionLagFailure(t *testing.T) {
// Earlier vtorc used to check that the promotion lag between the new primary and the old one
// was smaller than the configured value, otherwise it would fail the promotion
t.Skip()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{
ReplicationLagQuery: "select 61",
@@ -352,6 +372,7 @@ func TestPromotionLagFailure(t *testing.T) {
// We explicitly set one of the replicas to Prefer promotion rule.
// That is the replica which should be promoted in case of primary failure
func TestDownPrimaryPromotionRule(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
LockShardTimeoutSeconds: 5,
@@ -399,6 +420,7 @@ func TestDownPrimaryPromotionRule(t *testing.T) {
// That is the replica which should be promoted in case of primary failure
// It should also be caught up when it is promoted
func TestDownPrimaryPromotionRuleWithLag(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
LockShardTimeoutSeconds: 5,
@@ -478,6 +500,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) {
// We let a replica in our own cell lag. That is the replica which should be promoted in case of primary failure
// It should also be caught up when it is promoted
func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) {
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
defer cluster.PanicHandler(t)
utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{
LockShardTimeoutSeconds: 5,
diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
index c6426021d6c..75ecbfd592c 100644
--- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
+++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
@@ -30,9 +30,9 @@ import (
"vitess.io/vitess/go/vt/vtorc/server"
_ "github.com/go-sql-driver/mysql"
- _ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ _ "modernc.org/sqlite"
)
func TestReadTopologyInstanceBufferable(t *testing.T) {
@@ -40,6 +40,7 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
defer func() {
clusterInfo.ClusterInstance.Teardown()
}()
+ defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance)
keyspace := &clusterInfo.ClusterInstance.Keyspaces[0]
shard0 := &keyspace.Shards[0]
oldArgs := os.Args
@@ -103,7 +104,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.False(t, primaryInstance.HasReplicationCredentials)
assert.Equal(t, primaryInstance.ReplicationIOThreadState, inst.ReplicationThreadStateNoThread)
assert.Equal(t, primaryInstance.ReplicationSQLThreadState, inst.ReplicationThreadStateNoThread)
- assert.Equal(t, fmt.Sprintf("%v:%v", keyspace.Name, shard0.Name), primaryInstance.ClusterName)
// insert an errant GTID in the replica
_, err = utils.RunSQL(t, "insert into vt_insert_test(id, msg) values (10173, 'test 178342')", replica, "vt_ks")
@@ -147,7 +147,7 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.Equal(t, replicaInstance.ReadBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile)
assert.Greater(t, replicaInstance.ReadBinlogCoordinates.LogPos, int64(0))
assert.Equal(t, replicaInstance.ExecBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile)
- assert.LessOrEqual(t, replicaInstance.ExecBinlogCoordinates.LogPos, replicaInstance.ReadBinlogCoordinates.LogPos)
+ assert.Greater(t, replicaInstance.ExecBinlogCoordinates.LogPos, int64(0))
assert.Contains(t, replicaInstance.RelaylogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-relay", replica.TabletUID))
assert.Greater(t, replicaInstance.RelaylogCoordinates.LogPos, int64(0))
assert.Empty(t, replicaInstance.LastIOError)
@@ -159,5 +159,4 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.False(t, replicaInstance.HasReplicationFilters)
assert.LessOrEqual(t, int(replicaInstance.SecondsBehindPrimary.Int64), 1)
assert.False(t, replicaInstance.AllowTLS)
- assert.Equal(t, fmt.Sprintf("%v:%v", keyspace.Name, shard0.Name), replicaInstance.ClusterName)
}
diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go
index 156c8f3728e..25d3a8445c6 100644
--- a/go/test/endtoend/vtorc/utils/utils.go
+++ b/go/test/endtoend/vtorc/utils/utils.go
@@ -29,21 +29,18 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- // This imports toposervers to register their implementations of TopoServer.
- _ "vitess.io/vitess/go/vt/topo/consultopo"
- _ "vitess.io/vitess/go/vt/topo/etcd2topo"
- _ "vitess.io/vitess/go/vt/topo/k8stopo"
- _ "vitess.io/vitess/go/vt/topo/zk2topo"
-
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/log"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
+ _ "vitess.io/vitess/go/vt/topo/consultopo"
+ _ "vitess.io/vitess/go/vt/topo/etcd2topo"
+ _ "vitess.io/vitess/go/vt/topo/k8stopo"
"vitess.io/vitess/go/vt/topo/topoproto"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ _ "vitess.io/vitess/go/vt/topo/zk2topo"
)
const (
@@ -647,7 +644,7 @@ func PermanentlyRemoveVttablet(clusterInfo *VTOrcClusterInfo, tablet *cluster.Vt
for i, vttablet := range cellInfo.RdonlyTablets {
if vttablet == tablet {
// remove this tablet since its mysql has stopped
- cellInfo.ReplicaTablets = append(cellInfo.ReplicaTablets[:i], cellInfo.ReplicaTablets[i+1:]...)
+ cellInfo.RdonlyTablets = append(cellInfo.RdonlyTablets[:i], cellInfo.RdonlyTablets[i+1:]...)
KillTablets([]*cluster.Vttablet{tablet})
return
}
@@ -947,3 +944,26 @@ func WaitForSuccessfulRecoveryCount(t *testing.T, vtorcInstance *cluster.VTOrcPr
successCount := successfulRecoveriesMap[recoveryName]
assert.EqualValues(t, countExpected, successCount)
}
+
+// PrintVTOrcLogsOnFailure prints the VTOrc logs on failure of the test.
+// This function is supposed to be called as the first defer command from the vtorc tests.
+func PrintVTOrcLogsOnFailure(t *testing.T, clusterInstance *cluster.LocalProcessCluster) {
+ // If the test has not failed, then we don't need to print anything.
+ if !t.Failed() {
+ return
+ }
+
+ log.Errorf("Printing VTOrc logs")
+ for _, vtorc := range clusterInstance.VTOrcProcesses {
+ if vtorc == nil || vtorc.LogFileName == "" {
+ continue
+ }
+ filePath := path.Join(vtorc.LogDir, vtorc.LogFileName)
+ log.Errorf("Printing file - %s", filePath)
+ content, err := os.ReadFile(filePath)
+ if err != nil {
+ log.Errorf("Error while reading the file - %v", err)
+ }
+ log.Errorf("%s", string(content))
+ }
+}
diff --git a/go/test/fuzzing/tablet_manager_fuzzer.go b/go/test/fuzzing/tablet_manager_fuzzer.go
index 0e6b6aaece7..316cf75fb82 100644
--- a/go/test/fuzzing/tablet_manager_fuzzer.go
+++ b/go/test/fuzzing/tablet_manager_fuzzer.go
@@ -22,7 +22,7 @@ import (
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/dbconfigs"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/vttablet/tabletmanager"
"vitess.io/vitess/go/vt/vttablet/tabletservermock"
@@ -42,7 +42,7 @@ func FuzzTabletManagerExecuteFetchAsDba(data []byte) int {
cp := mysql.ConnParams{}
db := fakesqldb.New(t)
db.AddQueryPattern(".*", &sqltypes.Result{})
- daemon := fakemysqldaemon.NewFakeMysqlDaemon(db)
+ daemon := mysqlctl.NewFakeMysqlDaemon(db)
dbName := "dbname"
tm := &tabletmanager.TabletManager{
diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go
index d51bdeb5fd4..09161b2b13f 100644
--- a/go/test/fuzzing/vtctl_fuzzer.go
+++ b/go/test/fuzzing/vtctl_fuzzer.go
@@ -170,8 +170,8 @@ func Fuzz(data []byte) int {
chunkSize := len(restOfArray) / numberOfCalls
command := 0
for i := 0; i < len(restOfArray); i = i + chunkSize {
- from := i //lower
- to := i + chunkSize //upper
+ from := i // lower
+ to := i + chunkSize // upper
// Index of command in getCommandType():
commandIndex := int(commandPart[command]) % 68
@@ -180,9 +180,7 @@ func Fuzz(data []byte) int {
args := strings.Split(string(restOfArray[from:to]), " ")
// Add params to the command
- for i := range args {
- commandSlice = append(commandSlice, args[i])
- }
+ commandSlice = append(commandSlice, args...)
_ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), commandSlice)
command++
diff --git a/go/tools/ci-config/main.go b/go/tools/ci-config/main.go
new file mode 100644
index 00000000000..d767b6f4d32
--- /dev/null
+++ b/go/tools/ci-config/main.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+)
+
+type Test struct {
+ Args []string
+}
+
+type Config struct {
+ Tests map[string]*Test
+}
+
+func main() {
+ content, err := os.ReadFile("./test/config.json")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ tests := &Config{}
+ err = json.Unmarshal(content, tests)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var failedConfig []string
+ for name, test := range tests.Tests {
+ if len(test.Args) == 0 {
+ continue
+ }
+ path := test.Args[0]
+ if !strings.HasPrefix(path, "vitess.io/vitess/") {
+ continue
+ }
+ path = path[len("vitess.io/vitess/"):]
+
+ stat, err := os.Stat(path)
+ if err != nil || !stat.IsDir() {
+ failedConfig = append(failedConfig, fmt.Sprintf("%s: %s", name, path))
+ continue
+ }
+ }
+
+ if len(failedConfig) > 0 {
+ fmt.Println("Some packages in test/config.json were not found in the codebase:")
+ for _, failed := range failedConfig {
+ fmt.Println("\t" + failed)
+ }
+ fmt.Println("\nYou must remove them from test/config.json to avoid unnecessary CI load.")
+ os.Exit(1)
+ }
+ fmt.Println("The file: test/config.json is clean.")
+}
diff --git a/go/tools/go-upgrade/go-upgrade.go b/go/tools/go-upgrade/go-upgrade.go
new file mode 100644
index 00000000000..0cd8a945745
--- /dev/null
+++ b/go/tools/go-upgrade/go-upgrade.go
@@ -0,0 +1,531 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "encoding/json"
+
+ "github.com/hashicorp/go-version"
+ "github.com/spf13/cobra"
+)
+
+const (
+ goDevAPI = "https://go.dev/dl/?mode=json"
+)
+
+type (
+ latestGolangRelease struct {
+ Version string `json:"version"`
+ Stable bool `json:"stable"`
+ }
+
+ bootstrapVersion struct {
+ major, minor int // when minor == -1, it means there are no minor version
+ }
+)
+
+var (
+ workflowUpdate = true
+ allowMajorUpgrade = false
+ isMainBranch = false
+ goTo = ""
+
+ rootCmd = &cobra.Command{
+ Use: "go-upgrade",
+ Short: "Automates the Golang upgrade.",
+ Long: `go-upgrade allows us to automate some tasks required to bump the version of Golang used throughout our codebase.
+
+It mostly used by the update_golang_version.yml CI workflow that runs on a CRON.
+
+This tool is meant to be run at the root of the repository.
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ _ = cmd.Help()
+ },
+ Args: cobra.NoArgs,
+ }
+
+ getCmd = &cobra.Command{
+ Use: "get",
+ Short: "Command to get useful information about the codebase.",
+ Long: "Command to get useful information about the codebase.",
+ Run: func(cmd *cobra.Command, args []string) {
+ _ = cmd.Help()
+ },
+ Args: cobra.NoArgs,
+ }
+
+ getGoCmd = &cobra.Command{
+ Use: "go-version",
+ Short: "go-version prints the Golang version used by the current codebase.",
+ Long: "go-version prints the Golang version used by the current codebase.",
+ Run: runGetGoCmd,
+ Args: cobra.NoArgs,
+ }
+
+ getBootstrapCmd = &cobra.Command{
+ Use: "bootstrap-version",
+ Short: "bootstrap-version prints the Docker Bootstrap version used by the current codebase.",
+ Long: "bootstrap-version prints the Docker Bootstrap version used by the current codebase.",
+ Run: runGetBootstrapCmd,
+ Args: cobra.NoArgs,
+ }
+
+ upgradeCmd = &cobra.Command{
+ Use: "upgrade",
+ Short: "upgrade will upgrade the Golang and Bootstrap versions of the codebase to the latest available version.",
+ Long: `This command bumps the Golang and Bootstrap versions of the codebase.
+
+The latest available version of Golang will be fetched and used instead of the old version.
+
+By default, we do not allow major Golang version upgrade such as 1.20 to 1.21 but this can be overridden using the
+--allow-major-upgrade CLI flag. Usually, we only allow such upgrade on the main branch of the repository.
+
+In CI, particularly, we do not want to modify the workflow files before automatically creating a Pull Request to
+avoid permission issues. The rewrite of workflow files can be disabled using the --workflow-update=false CLI flag.
+
+Moreover, this command automatically bumps the bootstrap version of our codebase. If we are on the main branch, we
+want to use the CLI flag --main to remember to increment the bootstrap version by 1 instead of 0.1.`,
+ Run: runUpgradeCmd,
+ Args: cobra.NoArgs,
+ }
+
+ upgradeWorkflowsCmd = &cobra.Command{
+ Use: "workflows",
+ Short: "workflows will upgrade the Golang version used in our CI workflows files.",
+ Long: "This step is omitted by the bot since. We let the maintainers of Vitess manually upgrade the version used by the workflows using this command.",
+ Run: runUpgradeWorkflowsCmd,
+ Args: cobra.NoArgs,
+ }
+)
+
+func init() {
+ rootCmd.AddCommand(getCmd)
+ rootCmd.AddCommand(upgradeCmd)
+
+ getCmd.AddCommand(getGoCmd)
+ getCmd.AddCommand(getBootstrapCmd)
+
+ upgradeCmd.AddCommand(upgradeWorkflowsCmd)
+
+ upgradeCmd.Flags().BoolVar(&workflowUpdate, "workflow-update", workflowUpdate, "Whether or not the workflow files should be updated. Useful when using this script to auto-create PRs.")
+ upgradeCmd.Flags().BoolVar(&allowMajorUpgrade, "allow-major-upgrade", allowMajorUpgrade, "Defines if Golang major version upgrade are allowed.")
+ upgradeCmd.Flags().BoolVar(&isMainBranch, "main", isMainBranch, "Defines if the current branch is the main branch.")
+
+ upgradeWorkflowsCmd.Flags().StringVar(&goTo, "go-to", goTo, "The Golang version we want to upgrade to.")
+}
+
+func main() {
+ cobra.CheckErr(rootCmd.Execute())
+}
+
+func runGetGoCmd(_ *cobra.Command, _ []string) {
+ currentVersion, err := currentGolangVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(currentVersion.String())
+}
+
+func runGetBootstrapCmd(_ *cobra.Command, _ []string) {
+ currentVersion, err := currentBootstrapVersion()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(currentVersion.toString())
+}
+
+func runUpgradeWorkflowsCmd(_ *cobra.Command, _ []string) {
+ err := updateWorkflowFilesOnly(goTo)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func runUpgradeCmd(_ *cobra.Command, _ []string) {
+ err := upgradePath(allowMajorUpgrade, workflowUpdate, isMainBranch)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func updateWorkflowFilesOnly(goTo string) error {
+ newV, err := version.NewVersion(goTo)
+ if err != nil {
+ return err
+ }
+ filesToChange, err := getListOfFilesInPaths([]string{"./.github/workflows"})
+ if err != nil {
+ return err
+ }
+
+ for _, fileToChange := range filesToChange {
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(`go-version:[[:space:]]*([0-9.]+).*`)},
+ []string{"go-version: " + newV.String()},
+ fileToChange,
+ )
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func upgradePath(allowMajorUpgrade, workflowUpdate, isMainBranch bool) error {
+ currentVersion, err := currentGolangVersion()
+ if err != nil {
+ return err
+ }
+
+ availableVersions, err := getLatestStableGolangReleases()
+ if err != nil {
+ return err
+ }
+
+ upgradeTo := chooseNewVersion(currentVersion, availableVersions, allowMajorUpgrade)
+ if upgradeTo == nil {
+ return nil
+ }
+
+ err = replaceGoVersionInCodebase(currentVersion, upgradeTo, workflowUpdate)
+ if err != nil {
+ return err
+ }
+
+ currentBootstrapVersionF, err := currentBootstrapVersion()
+ if err != nil {
+ return err
+ }
+ nextBootstrapVersionF := currentBootstrapVersionF
+ if isMainBranch {
+ nextBootstrapVersionF.major += 1
+ } else {
+ nextBootstrapVersionF.minor += 1
+ }
+ err = updateBootstrapVersionInCodebase(currentBootstrapVersionF.toString(), nextBootstrapVersionF.toString(), upgradeTo)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// currentGolangVersion gets the running version of Golang in Vitess
+// and returns it as a *version.Version.
+//
+// The file `./build.env` describes which version of Golang is expected by Vitess.
+// We use this file to detect the current Golang version of our codebase.
+// The file contains `goversion_min x.xx.xx`, we will grep `goversion_min` to finally find
+// the precise golang version we're using.
+func currentGolangVersion() (*version.Version, error) {
+ contentRaw, err := os.ReadFile("build.env")
+ if err != nil {
+ return nil, err
+ }
+ content := string(contentRaw)
+
+ versre := regexp.MustCompile("(?i).*goversion_min[[:space:]]*([0-9.]+).*")
+ versionStr := versre.FindStringSubmatch(content)
+ if len(versionStr) != 2 {
+ return nil, fmt.Errorf("malformatted error, got: %v", versionStr)
+ }
+ return version.NewVersion(versionStr[1])
+}
+
+func currentBootstrapVersion() (bootstrapVersion, error) {
+ contentRaw, err := os.ReadFile("Makefile")
+ if err != nil {
+ return bootstrapVersion{}, err
+ }
+ content := string(contentRaw)
+
+ versre := regexp.MustCompile("(?i).*BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*([0-9.]+).*")
+ versionStr := versre.FindStringSubmatch(content)
+ if len(versionStr) != 2 {
+ return bootstrapVersion{}, fmt.Errorf("malformatted error, got: %v", versionStr)
+ }
+
+ vs := strings.Split(versionStr[1], ".")
+ major, err := strconv.Atoi(vs[0])
+ if err != nil {
+ return bootstrapVersion{}, err
+ }
+
+ minor := -1
+ if len(vs) > 1 {
+ minor, err = strconv.Atoi(vs[1])
+ if err != nil {
+ return bootstrapVersion{}, err
+ }
+ }
+
+ return bootstrapVersion{
+ major: major,
+ minor: minor,
+ }, nil
+}
+
+// getLatestStableGolangReleases fetches the latest stable releases of Golang from
+// the official website using the goDevAPI URL.
+// Once fetched, the releases are returned as version.Collection.
+func getLatestStableGolangReleases() (version.Collection, error) {
+ resp, err := http.Get(goDevAPI)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var latestGoReleases []latestGolangRelease
+ err = json.Unmarshal(body, &latestGoReleases)
+ if err != nil {
+ return nil, err
+ }
+
+ var versions version.Collection
+ for _, release := range latestGoReleases {
+ if !release.Stable {
+ continue
+ }
+ if !strings.HasPrefix(release.Version, "go") {
+ return nil, fmt.Errorf("golang version malformatted: %s", release.Version)
+ }
+ newVersion, err := version.NewVersion(release.Version[2:])
+ if err != nil {
+ return nil, err
+ }
+ versions = append(versions, newVersion)
+ }
+ return versions, nil
+}
+
+// chooseNewVersion decides what will be the next version we're going to use in our codebase.
+// Given the current Golang version, the available latest versions and whether we allow major upgrade or not,
+// chooseNewVersion will return either the new version or nil if we cannot/don't need to upgrade.
+func chooseNewVersion(curVersion *version.Version, latestVersions version.Collection, allowMajorUpgrade bool) *version.Version {
+ selectedVersion := curVersion
+ for _, latestVersion := range latestVersions {
+ if !allowMajorUpgrade && !isSameMajorMinorVersion(latestVersion, selectedVersion) {
+ continue
+ }
+ if latestVersion.GreaterThan(selectedVersion) {
+ selectedVersion = latestVersion
+ }
+ }
+ // No change detected, return nil meaning that we do not want to have a new Golang version.
+ if selectedVersion.Equal(curVersion) {
+ return nil
+ }
+ return selectedVersion
+}
+
+// replaceGoVersionInCodebase goes through all the files in the codebase where the
+// Golang version must be updated
+func replaceGoVersionInCodebase(old, new *version.Version, workflowUpdate bool) error {
+ if old.Equal(new) {
+ return nil
+ }
+ explore := []string{
+ "./test/templates",
+ "./build.env",
+ "./docker/bootstrap/Dockerfile.common",
+ }
+ if workflowUpdate {
+ explore = append(explore, "./.github/workflows")
+ }
+ filesToChange, err := getListOfFilesInPaths(explore)
+ if err != nil {
+ return err
+ }
+
+ for _, fileToChange := range filesToChange {
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(fmt.Sprintf(`(%s)`, old.String()))},
+ []string{new.String()},
+ fileToChange,
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ if !isSameMajorMinorVersion(old, new) {
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(`go[[:space:]]*([0-9.]+)`)},
+ []string{fmt.Sprintf("go %d.%d", new.Segments()[0], new.Segments()[1])},
+ "./go.mod",
+ )
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func updateBootstrapVersionInCodebase(old, new string, newGoVersion *version.Version) error {
+ if old == new {
+ return nil
+ }
+ files, err := getListOfFilesInPaths([]string{
+ "./docker/base",
+ "./docker/lite",
+ "./docker/local",
+ "./docker/vttestserver",
+ "./Makefile",
+ "./test/templates",
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ err = replaceInFile(
+ []*regexp.Regexp{
+ regexp.MustCompile(`ARG[[:space:]]*bootstrap_version[[:space:]]*=[[:space:]]*[0-9.]+`), // Dockerfile
+ regexp.MustCompile(`BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*[0-9.]+`), // Makefile
+ },
+ []string{
+ fmt.Sprintf("ARG bootstrap_version=%s", new), // Dockerfile
+ fmt.Sprintf("BOOTSTRAP_VERSION=%s", new), // Makefile
+ },
+ file,
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ err = replaceInFile(
+ []*regexp.Regexp{regexp.MustCompile(`\"bootstrap-version\",[[:space:]]*\"([0-9.]+)\"`)},
+ []string{fmt.Sprintf("\"bootstrap-version\", \"%s\"", new)},
+ "./test.go",
+ )
+ if err != nil {
+ return err
+ }
+
+ err = updateBootstrapChangelog(new, newGoVersion)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func updateBootstrapChangelog(new string, goVersion *version.Version) error {
+ file, err := os.OpenFile("./docker/bootstrap/CHANGELOG.md", os.O_RDWR, 0600)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ s, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ newContent := fmt.Sprintf(`
+
+## [%s] - %s
+### Changes
+- Update build to golang %s`, new, time.Now().Format("2006-01-02"), goVersion.String())
+
+ _, err = file.WriteAt([]byte(newContent), s.Size())
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func isSameMajorMinorVersion(a, b *version.Version) bool {
+ return a.Segments()[0] == b.Segments()[0] && a.Segments()[1] == b.Segments()[1]
+}
+
+func getListOfFilesInPaths(pathsToExplore []string) ([]string, error) {
+ var filesToChange []string
+ for _, pathToExplore := range pathsToExplore {
+ stat, err := os.Stat(pathToExplore)
+ if err != nil {
+ return nil, err
+ }
+ if stat.IsDir() {
+ dirEntries, err := os.ReadDir(pathToExplore)
+ if err != nil {
+ return nil, err
+ }
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
+ continue
+ }
+ filesToChange = append(filesToChange, path.Join(pathToExplore, entry.Name()))
+ }
+ } else {
+ filesToChange = append(filesToChange, pathToExplore)
+ }
+ }
+ return filesToChange, nil
+}
+
+// replaceInFile replaces old with new in the given file.
+func replaceInFile(oldexps []*regexp.Regexp, new []string, fileToChange string) error {
+ if len(oldexps) != len(new) {
+ panic("old and new should be of the same length")
+ }
+
+ f, err := os.OpenFile(fileToChange, os.O_RDWR, 0600)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ content, err := io.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ contentStr := string(content)
+
+ for i, oldex := range oldexps {
+ contentStr = oldex.ReplaceAllString(contentStr, new[i])
+ }
+
+ _, err = f.WriteAt([]byte(contentStr), 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b bootstrapVersion) toString() string {
+ if b.minor == -1 {
+ return fmt.Sprintf("%d", b.major)
+ }
+ return fmt.Sprintf("%d.%d", b.major, b.minor)
+}
diff --git a/go/tools/release-notes/release_notes.go b/go/tools/release-notes/release_notes.go
index 61d899f370e..5bb03339245 100644
--- a/go/tools/release-notes/release_notes.go
+++ b/go/tools/release-notes/release_notes.go
@@ -27,7 +27,6 @@ import (
"regexp"
"sort"
"strings"
- "sync"
"text/template"
"github.com/spf13/pflag"
@@ -40,24 +39,24 @@ type (
labels []label
- author struct {
- Login string `json:"login"`
+ pullRequestAuthor struct {
+ Login string
}
- prInfo struct {
- Labels labels `json:"labels"`
- Number int `json:"number"`
- Title string `json:"title"`
- Author author `json:"author"`
+ pullRequestInformation struct {
+ Number int
+ Title string
+ Labels labels
+ Author pullRequestAuthor
}
- prsByComponent = map[string][]prInfo
+ prsByComponent = map[string][]pullRequestInformation
prsByType = map[string]prsByComponent
sortedPRComponent struct {
Name string
- PrInfos []prInfo
+ PrInfos []pullRequestInformation
}
sortedPRType struct {
@@ -76,14 +75,17 @@ type (
KnownIssues string
AddDetails string
PathToChangeLogFileOnGH, ChangeLog, ChangeMetrics string
+ SubDirPath string
}
)
-const (
- releaseNotesPath = `doc/releasenotes/`
- releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/` + releaseNotesPath
+var (
+ releaseNotesPath = `changelog/`
+)
- markdownTemplate = `# Release of Vitess {{.Version}}
+const (
+ releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/`
+ markdownTemplate = `# Release of Vitess {{.Version}}
{{- if or .Announcement .AddDetails }}
{{ .Announcement }}
@@ -131,16 +133,15 @@ The entire changelog for this release can be found [here]({{ .PathToChangeLogFil
prefixType = "Type: "
prefixComponent = "Component: "
- numberOfThreads = 10
lengthOfSingleSHA = 40
)
func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error {
var err error
// Generate the release notes
- rn.PathToChangeLogFileOnGH = fmt.Sprintf(releaseNotesPathGitHub+"%s_changelog.md", rn.VersionUnderscore)
+ rn.PathToChangeLogFileOnGH = releaseNotesPathGitHub + path.Join(rn.SubDirPath, "changelog.md")
if rnFile == nil {
- rnFile, err = os.OpenFile(fmt.Sprintf(path.Join(releaseNotesPath, "%s_release_notes.md"), rn.VersionUnderscore), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ rnFile, err = os.OpenFile(path.Join(rn.SubDirPath, "release_notes.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
@@ -154,7 +155,7 @@ func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error {
// Generate the changelog
if changelogFile == nil {
- changelogFile, err = os.OpenFile(fmt.Sprintf(path.Join(releaseNotesPath, "%s_changelog.md"), rn.VersionUnderscore), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ changelogFile, err = os.OpenFile(path.Join(rn.SubDirPath, "changelog.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
@@ -185,61 +186,27 @@ func loadKnownIssues(release string) ([]knownIssue, error) {
return knownIssues, nil
}
-func loadMergedPRs(from, to string) (prs []string, authors []string, commitCount int, err error) {
- // load the git log with "author \t title \t parents"
- out, err := execCmd("git", "log", `--pretty=format:%ae%x09%s%x09%P%x09%h`, fmt.Sprintf("%s..%s", from, to))
-
+func loadMergedPRsAndAuthors(name string) (pris []pullRequestInformation, authors []string, err error) {
+ out, err := execCmd("gh", "pr", "list", "-s", "merged", "-S", fmt.Sprintf("milestone:%s", name), "--json", "number,title,labels,author", "--limit", "5000")
if err != nil {
return
}
- return parseGitLog(string(out))
-}
-
-func parseGitLog(s string) (prs []string, authorCommits []string, commitCount int, err error) {
- rx := regexp.MustCompile(`(.+)\t(.+)\t(.+)\t(.+)`)
- mergePR := regexp.MustCompile(`Merge pull request #(\d+)`)
- squashPR := regexp.MustCompile(`\(#(\d+)\)`)
- authMap := map[string]string{} // here we will store email <-> gh user mappings
- lines := strings.Split(s, "\n")
- for _, line := range lines {
- lineInfo := rx.FindStringSubmatch(line)
- if len(lineInfo) != 5 {
- log.Fatalf("failed to parse the output from git log: %s", line)
- }
- authorEmail := lineInfo[1]
- title := lineInfo[2]
- parents := lineInfo[3]
- sha := lineInfo[4]
- merged := mergePR.FindStringSubmatch(title)
- if len(merged) == 2 {
- // this is a merged PR. remember the PR #
- prs = append(prs, merged[1])
- continue
- }
-
- if len(parents) <= lengthOfSingleSHA {
- // we have a single parent, and the commit counts
- commitCount++
- if _, exists := authMap[authorEmail]; !exists {
- authMap[authorEmail] = sha
- }
- }
-
- squashed := squashPR.FindStringSubmatch(title)
- if len(squashed) == 2 {
- // this is a merged PR. remember the PR #
- prs = append(prs, squashed[1])
- continue
- }
+ err = json.Unmarshal(out, &pris)
+ if err != nil {
+ return nil, nil, err
}
- for _, author := range authMap {
- authorCommits = append(authorCommits, author)
+ // Get the full list of distinct PRs authors and sort them
+ authorMap := map[string]bool{}
+ for _, pri := range pris {
+ login := pri.Author.Login
+ if ok := authorMap[login]; !ok {
+ authors = append(authors, login)
+ authorMap[login] = true
+ }
}
-
- sort.Strings(prs)
- sort.Strings(authorCommits) // not really needed, but makes testing easier
+ sort.Strings(authors)
return
}
@@ -259,134 +226,10 @@ func execCmd(name string, arg ...string) ([]byte, error) {
return out, nil
}
-func loadPRInfo(pr string) (prInfo, error) {
- out, err := execCmd("gh", "pr", "view", pr, "--json", "title,number,labels,author")
- if err != nil {
- return prInfo{}, err
- }
- var prInfo prInfo
- err = json.Unmarshal(out, &prInfo)
- return prInfo, err
-}
-
-func loadAuthorInfo(sha string) (string, error) {
- out, err := execCmd("gh", "api", "/repos/vitessio/vitess/commits/"+sha)
- if err != nil {
- return "", err
- }
- var prInfo prInfo
- err = json.Unmarshal(out, &prInfo)
- if err != nil {
- return "", err
- }
- return prInfo.Author.Login, nil
-}
-
-type req struct {
- isPR bool
- key string
-}
-
-func loadAllPRs(prs, authorCommits []string) ([]prInfo, []string, error) {
- errChan := make(chan error)
- wgDone := make(chan bool)
- prChan := make(chan req, len(prs)+len(authorCommits))
- // fill the work queue
- for _, s := range prs {
- prChan <- req{isPR: true, key: s}
- }
- for _, s := range authorCommits {
- prChan <- req{isPR: false, key: s}
- }
- close(prChan)
-
- var prInfos []prInfo
- var authors []string
- fmt.Printf("Found %d merged PRs. Loading PR info", len(prs))
- wg := sync.WaitGroup{}
- mu := sync.Mutex{}
-
- shouldLoad := func(in string) bool {
- if in == "" {
- return false
- }
- mu.Lock()
- defer mu.Unlock()
-
- for _, existing := range authors {
- if existing == in {
- return false
- }
- }
- return true
- }
- addAuthor := func(in string) {
- mu.Lock()
- defer mu.Unlock()
- authors = append(authors, in)
- }
- addPR := func(in prInfo) {
- mu.Lock()
- defer mu.Unlock()
- prInfos = append(prInfos, in)
- }
-
- for i := 0; i < numberOfThreads; i++ {
- wg.Add(1)
- go func() {
- // load meta data about PRs
- defer wg.Done()
-
- for b := range prChan {
- fmt.Print(".")
-
- if b.isPR {
- prInfo, err := loadPRInfo(b.key)
- if err != nil {
- errChan <- err
- break
- }
- addPR(prInfo)
- continue
- }
- author, err := loadAuthorInfo(b.key)
- if err != nil {
- errChan <- err
- break
- }
- if shouldLoad(author) {
- addAuthor(author)
- }
-
- }
- }()
- }
-
- go func() {
- // wait for the loading to finish
- wg.Wait()
- close(wgDone)
- }()
-
- var err error
- select {
- case <-wgDone:
- break
- case err = <-errChan:
- break
- }
-
- fmt.Println()
-
- sort.Strings(authors)
-
- return prInfos, authors, err
-}
-
-func groupPRs(prInfos []prInfo) prsByType {
+func groupPRs(pris []pullRequestInformation) prsByType {
prPerType := prsByType{}
- for _, info := range prInfos {
+ for _, info := range pris {
var typ, component string
for _, lbl := range info.Labels {
switch {
@@ -476,11 +319,11 @@ func getStringForKnownIssues(issues []knownIssue) (string, error) {
return buff.String(), nil
}
-func groupAndStringifyPullRequest(pr []prInfo) (string, error) {
- if len(pr) == 0 {
+func groupAndStringifyPullRequest(pris []pullRequestInformation) (string, error) {
+ if len(pris) == 0 {
return "", nil
}
- prPerType := groupPRs(pr)
+ prPerType := groupPRs(pris)
prStr, err := getStringForPullRequestInfos(prPerType)
if err != nil {
return "", err
@@ -490,11 +333,8 @@ func groupAndStringifyPullRequest(pr []prInfo) (string, error) {
func main() {
var (
- from, versionName, summaryFile string
- to = "HEAD"
+ versionName, summaryFile string
)
- pflag.StringVarP(&from, "from", "f", "", "from sha/tag/branch")
- pflag.StringVarP(&to, "to", to, "t", "to sha/tag/branch")
pflag.StringVarP(&versionName, "version", "v", "", "name of the version (has to be the following format: v11.0.0)")
pflag.StringVarP(&summaryFile, "summary", "s", "", "readme file on which there is a summary of the release")
pflag.Parse()
@@ -507,9 +347,20 @@ func main() {
log.Fatal("The --version flag must be set using a valid format. Format: 'vX.X.X'.")
}
+ // Define the path to the release notes folder
+ majorVersion := versionMatch[1] + "." + versionMatch[2]
+ patchVersion := versionMatch[1] + "." + versionMatch[2] + "." + versionMatch[3]
+ releaseNotesPath = path.Join(releaseNotesPath, majorVersion, patchVersion)
+
+ err := os.MkdirAll(releaseNotesPath, os.ModePerm)
+ if err != nil {
+ log.Fatal(err)
+ }
+
releaseNotes := releaseNote{
Version: versionName,
VersionUnderscore: fmt.Sprintf("%s_%s_%s", versionMatch[1], versionMatch[2], versionMatch[3]), // v14.0.0 -> 14_0_0, this is used to format filenames.
+ SubDirPath: releaseNotesPath,
}
// summary of the release
@@ -533,26 +384,23 @@ func main() {
releaseNotes.KnownIssues = knownIssuesStr
// changelog with pull requests
- prs, authorCommits, commits, err := loadMergedPRs(from, to)
+ prs, authors, err := loadMergedPRsAndAuthors(versionName)
if err != nil {
log.Fatal(err)
}
- prInfos, authors, err := loadAllPRs(prs, authorCommits)
- if err != nil {
- log.Fatal(err)
- }
- releaseNotes.ChangeLog, err = groupAndStringifyPullRequest(prInfos)
+
+ releaseNotes.ChangeLog, err = groupAndStringifyPullRequest(prs)
if err != nil {
log.Fatal(err)
}
// changelog metrics
- if commits > 0 && len(authors) > 0 {
+ if len(prs) > 0 && len(authors) > 0 {
releaseNotes.ChangeMetrics = fmt.Sprintf(`
-The release includes %d commits (excluding merges)
+The release includes %d merged Pull Requests.
Thanks to all our contributors: @%s
-`, commits, strings.Join(authors, ", @"))
+`, len(prs), strings.Join(authors, ", @"))
}
if err := releaseNotes.generate(nil, nil); err != nil {
diff --git a/go/tools/release-notes/release_notes_test.go b/go/tools/release-notes/release_notes_test.go
index 0622d458d28..19f946525c3 100644
--- a/go/tools/release-notes/release_notes_test.go
+++ b/go/tools/release-notes/release_notes_test.go
@@ -20,7 +20,6 @@ import (
"os"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/utils"
@@ -29,26 +28,26 @@ import (
func Test_groupPRs(t *testing.T) {
tests := []struct {
name string
- prInfos []prInfo
- want map[string]map[string][]prInfo
+ prInfos []pullRequestInformation
+ want map[string]map[string][]pullRequestInformation
}{
{
name: "Single PR info with no labels",
- prInfos: []prInfo{{Title: "pr 1", Number: 1}},
- want: map[string]map[string][]prInfo{"Other": {"Other": []prInfo{{Title: "pr 1", Number: 1}}}},
+ prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1}},
+ want: map[string]map[string][]pullRequestInformation{"Other": {"Other": []pullRequestInformation{{Title: "pr 1", Number: 1}}}},
}, {
name: "Single PR info with type label",
- prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}},
- want: map[string]map[string][]prInfo{"Bug fixes": {"Other": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}},
+ prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}},
+ want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"Other": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}},
{
name: "Single PR info with type and component labels",
- prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}},
- want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}},
+ prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}},
+ want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"VTGate": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}},
{
- name: "Multiple PR infos with type and component labels", prInfos: []prInfo{
+ name: "Multiple PR infos with type and component labels", prInfos: []pullRequestInformation{
{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}},
{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}},
- want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []prInfo{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}},
+ want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"VTGate": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []pullRequestInformation{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -58,54 +57,6 @@ func Test_groupPRs(t *testing.T) {
}
}
-func TestParseGitLogOutput(t *testing.T) {
- in := `harshTEST@planetscale.com Merge pull request #7968 from planetscale/bump_java_snapshot_v11 7e8ebbb5b79b65d2d45fd6c838efb51bdafc7c0b 195a09df191d3e86a32ebcc7a1f1dde168fe819e 168fe819e
-deeptTEST@planetscale.com Merge pull request #7970 from planetscale/vttestserver-default-charset 887be6914690b6d106aba001c72deea80a4d8dab ff8c750eda4b30787e772547a451ed1f50931150 f50931150
-deeptTEST@planetscale.com Merge pull request #7943 from planetscale/fix-mysql80-container-image 01fb7e55ab92df7c3f300b85976fdf3fd5bd35b3 3cc94a10752014c9ce311d88af9e1aa18e7fa2d8 18e7fa2d8
-57520317+rohit-nayak-TEST@users.noreply.github.com Merge pull request #7831 from planetscale/rn-vr-log2 37c09d3be83922a8ef936fbc028a5031f96b7dbf f57350c3ea1720496e5f1cec35d58f069e4df515 69e4df515
-TEST@planetscale.com docker/vttestserver/run.sh: Add $CHARSET environment variable 482a7008117ee3215663aeb33cad981e5242a88a e5242a88a
-rohTEST@planetscale.com Add ability to select from vreplication_log in VReplicationExec 427cac89cd6b143d3a1928ee682b3a9538709da5 538709da5
-rohTEST@planetscale.com Use withDDL for vreplication log queries 4a1ab946e3628ba8ef610ea4a158186a5fdd17ba a5fdd17ba
-rohTEST@planetscale.com Add license file. Minor refactor fa9de690ce0d27a781befbc1866aca5cd447798f cd447798f
-rohTEST@planetscale.com Added comments and refactored tests b6d39acb08939ba56e9e9587f34f3b8bcdcdc504 bcdcdc504
-rohTEST@planetscale.com Add logs for start and end of the copy phase 1cf72866ddfbd554700d6c9e32b9835ebb3b444c ebb3b444c
-rohTEST@planetscale.com Fix test 0992d39c6d473b548679d012cfa5a889ffa448ef 9ffa448ef
-rohTEST@planetscale.com Add test for vreplication log and fix string conversion bug b616143b14b75e7c23042c2eef4f6b27a275b0f7 7a275b0f7
-rohTEST@planetscale.com Ignore queries related to _vt.vreplication_log in tests e6926932c14da9a2213be246bc2de5f011668551 011668551
-rohTEST@planetscale.com Create log table. Util functions to insert logs. Insert logs in VReplicationExec and setMessage/State 37c09d3be83922a8ef936fbc028a5031f96b7dbf 1f96b7dbf
-harshTEST@planetscale.com Merge pull request #7951 from vmg/vmg/vr-client-perf 7794c62651066970e1176181cb7000d385d0b327 172fac7dec8b11937a4efb26ebf4bedf1771f189 f1771f189
-alkin.tezuysTEST@gmail.com java: Bump SNAPSHOT version to 11.0.0-SNAPSHOT after Vitess release v10 7794c62651066970e1176181cb7000d385d0b327 385d0b327
-alkin.tezuysTEST@gmail.com Merge pull request #7964 from planetscale/10_0_RC1_release_notes 31d84d6ce8e233a053794ad0ffe5168d34d04450 b020dc71f5c7dc663d814563f1b6c97340f4411f 340f4411f
-vTEST@strn.cat vstreamer: fix docs e7bf329da0029414c3b18e18e5cb2226b9a731a2 6b9a731a2
-amasTEST@slack-corp.com [workflow] extract migration targets from wrangler (#7934) 8bd5a7cb093369b50a0926bfa3a112b3b744e782 3b744e782
-alkin.tezuysTEST@gmail.com More spacing issues fixed 7509d47ba785e7a39b8726dc80f93955953ab98d 5953ab98d
-alkin.tezuysTEST@gmail.com Minor spacing fixes d31362e76ac69fb2bc4083e22e7c87683099fecd 83099fecd
-alkin.tezuysTEST@gmail.com Update 10_0_0_release_notes.md a7034bdf5d454a47738335ed2afc75f72bdbcf37 72bdbcf37
-alkin.tezuysTEST@gmail.com v10 GA Release Notes ad37320b2637620ee36d44d163399ecc2c1eea6c c2c1eea6c
-andrTEST@planetscale.com Merge pull request #7912 from planetscale/show-databases-like 7e13d4bccca0325ca07a488334e77c4f2f964f6b 95eceb17d10c62d56f2e94e5478afb5a1b63e1c2 a1b63e1c2
-andrTEST@planetscale.com Merge pull request #7629 from planetscale/gen4-table-aliases 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 7ad14e3f3d26cb1780cdbf9c22029740e5aebde4 0e5aebde4
-andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into show-databases-like 6b3ee1c31a939fc6628515f00087baa3e1e8acf7 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 860d3c66e
-2607934+shlomi-noaTEST@users.noreply.github.com Merge pull request #7959 from Hellcatlk/master 6c826115937d28ef83f05a1f0d54db0fcb814db4 cdab3040aaaa11c51e291d6b1a7af6fadd83dedf add83dedf
-zouy.fnTEST@cn.fujitsu.com Fix a gofmt warning 08038850a258d6de250cf9d864d6118616f5562c 616f5562c
-vTEST@strn.cat mysql: allow reusing row storage when reading from a stream a2850bbf41100618cb1192067b16585ba7c6b0c7 ba7c6b0c7
-vTEST@strn.cat throttle: do not check for time constantly e0b90daebe9e6b98d969934a24899b41d25e3a68 1d25e3a68
-andrTEST@planetscale.com fix compilation error 18036f5fb5f58523dbf50726beb741cedac2baf8 edac2baf8
-andrTEST@planetscale.com better code comment c173c945cf0e75e8649e6fa621509b5fb4ebd6c9 fb4ebd6c9
-vTEST@strn.cat conn: do not let header escape to the heap d31fb23d8cb9463810ed9fc132df4060a6812f6e 0a6812f6e
-vTEST@strn.cat vstreamer: do not allocate when filtering rows dafc1cb729d7be7dff2c05bd05a926005eb9a044 05eb9a044
-vTEST@strn.cat vstreamer: do not allocate when converting rows c5cd3067aeb9d952a2f45084c37634267e4f9062 67e4f9062
-andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into gen4-table-aliases 8c01827ed8b748240f213d9476ee162306ab01eb b1f9000ddd166d49adda6581e7ca9e0aca10c252 aca10c252
-aquarapTEST@gmail.com Fix mysql80 docker build with dep. a28591577b8d432b9c5d78abf59ad494a0a943b0 4a0a943b0
-TEST@planetscale.com Revert "docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24" 7858ff46545cff749b3663c92ae90ef27a5dfbc2 27a5dfbc2
-TEST@planetscale.com docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24 c91d46782933292941a846fef2590ff1a6fa193f a6fa193f`
-
- prs, authorCommits, nonMergeCommits, err := parseGitLog(in)
- require.NoError(t, err)
- assert.Equal(t, prs, []string{"7629", "7831", "7912", "7934", "7943", "7951", "7959", "7964", "7968", "7970"})
- assert.Equal(t, authorCommits, []string{"385d0b327", "3b744e782", "4a0a943b0", "538709da5", "616f5562c", "6b9a731a2", "e5242a88a", "edac2baf8"})
- assert.Equal(t, 28, nonMergeCommits)
-}
-
func TestLoadSummaryReadme(t *testing.T) {
readmeFile, err := os.CreateTemp("", "*.md")
require.NoError(t, err)
@@ -160,11 +111,12 @@ func TestGenerateReleaseNotes(t *testing.T) {
VersionUnderscore: "12_0_0",
ChangeLog: "* PR 1\n* PR 2\n",
ChangeMetrics: "optimization is the root of all evil",
+ SubDirPath: "changelog/12.0/12.0.0",
},
expectedOut: "# Release of Vitess v12.0.0\n" +
"This is the new release.\n\nNew features got added.\n" +
"------------\n" +
- "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_0_changelog.md).\n" +
+ "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.0/changelog.md).\n" +
"optimization is the root of all evil\n",
expectedOutChangeLog: "# Changelog of Vitess v12.0.0\n" +
"* PR 1\n" +
@@ -176,9 +128,10 @@ func TestGenerateReleaseNotes(t *testing.T) {
VersionUnderscore: "12_0_0",
ChangeLog: "* PR 1\n* PR 2\n",
ChangeMetrics: "optimization is the root of all evil",
+ SubDirPath: "changelog/12.0/12.0.0",
},
expectedOut: "# Release of Vitess v12.0.0\n" +
- "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_0_changelog.md).\n" +
+ "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.0/changelog.md).\n" +
"optimization is the root of all evil\n",
expectedOutChangeLog: "# Changelog of Vitess v12.0.0\n" +
"* PR 1\n" +
diff --git a/go/tools/releases/releases.go b/go/tools/releases/releases.go
new file mode 100644
index 00000000000..10c29233494
--- /dev/null
+++ b/go/tools/releases/releases.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+// The changelog directory is composed of a README that lists
+// and links to all major releases of Vitess. It has one
+// sub-directory for each major version. Each sub-directory is
+// composed of another README that also lists and links all the
+// patch releases of this major release. Those sub-directories
+// are composed of one directory per patch release. Finally,
+// the patch release directory contains the old files markdown:
+// summary, release_notes, changelog.
+//
+// This tool is solely responsible for generating the READMEs
+// and making sure they are up-to-date with the list of major
+// and patch releases we have.
+
+import (
+ "log"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "text/template"
+)
+
+const (
+ rootDir = "./changelog/"
+
+ rootFileTmpl = `## Releases
+
+{{- range $r := .SubDirs }}
+* [{{ $r.Name }}]({{ $r.Name }})
+{{- end -}}
+`
+
+ majorVersionTmpl = `## v{{ .Name }}
+
+{{- if .Team }}
+The dedicated team for this release can be found [here]({{.Team}}).{{ end }}
+
+{{- range $r := .SubDirs }}
+* **[{{ $r.Name }}]({{ $r.Name }})**
+{{ if $r.Changelog }} * [Changelog]({{ $r.Name }}/{{ $r.Changelog }})
+{{ end -}}
+{{ if $r.ReleaseNotes }} * [Release Notes]({{ $r.Name }}/{{ $r.ReleaseNotes }})
+{{ end -}}
+{{- end -}}
+`
+)
+
+type dir struct {
+ Name string
+ Path string
+ Changelog string
+ ReleaseNotes string
+ Team string
+ SubDirs []dir
+}
+
+func main() {
+ rootDir, err := getDirs(dir{Path: rootDir})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = execReadMeTemplateWithDir(rootDir, rootFileTmpl)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ for _, subDir := range rootDir.SubDirs {
+ err := execReadMeTemplateWithDir(subDir, majorVersionTmpl)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func execReadMeTemplateWithDir(d dir, tmpl string) error {
+ rootRM, err := os.OpenFile(path.Join(d.Path, "README.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640)
+ if err != nil {
+ return err
+ }
+
+ t := template.Must(template.New("root_readme").Parse(tmpl))
+ err = t.ExecuteTemplate(rootRM, "root_readme", d)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func getDirs(curDir dir) (dir, error) {
+ entries, err := os.ReadDir(curDir.Path)
+ if err != nil {
+ return dir{}, err
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ subDir, err := getDirs(dir{
+ Name: entry.Name(),
+ Path: path.Join(curDir.Path, entry.Name()),
+ })
+ if err != nil {
+ return dir{}, err
+ }
+ curDir.SubDirs = append(curDir.SubDirs, subDir)
+ continue
+ }
+
+ switch {
+ case strings.Contains(entry.Name(), "changelog.md"):
+ curDir.Changelog = entry.Name()
+ case strings.Contains(entry.Name(), "release_notes.md"):
+ curDir.ReleaseNotes = entry.Name()
+ case strings.Contains(entry.Name(), "team.md"):
+ curDir.Team = entry.Name()
+ }
+ }
+ sort.Slice(curDir.SubDirs, func(i, j int) bool {
+ if len(curDir.SubDirs[i].Name) < len(curDir.SubDirs[j].Name) {
+ return false
+ }
+ return curDir.SubDirs[i].Name > curDir.SubDirs[j].Name
+ })
+ return curDir, nil
+}
diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go
index 1f0a0bbb0e2..5a5dbc1c1a1 100644
--- a/go/vt/dbconfigs/credentials.go
+++ b/go/vt/dbconfigs/credentials.go
@@ -24,7 +24,6 @@ package dbconfigs
import (
"encoding/json"
"errors"
- "flag"
"os"
"os/signal"
"strings"
@@ -112,15 +111,15 @@ func init() {
fs.StringVar(&dbCredentialsFile, "db-credentials-file", dbCredentialsFile, "db credentials file; send SIGHUP to reload this file")
// 'vault' implementation flags
- flag.StringVar(&vaultAddr, "db-credentials-vault-addr", vaultAddr, "URL to Vault server")
- flag.DurationVar(&vaultTimeout, "db-credentials-vault-timeout", vaultTimeout, "Timeout for vault API operations")
- flag.StringVar(&vaultCACert, "db-credentials-vault-tls-ca", vaultCACert, "Path to CA PEM for validating Vault server certificate")
- flag.StringVar(&vaultPath, "db-credentials-vault-path", vaultPath, "Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds")
- flag.DurationVar(&vaultCacheTTL, "db-credentials-vault-ttl", vaultCacheTTL, "How long to cache DB credentials from the Vault server")
- flag.StringVar(&vaultTokenFile, "db-credentials-vault-tokenfile", vaultTokenFile, "Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable")
- flag.StringVar(&vaultRoleID, "db-credentials-vault-roleid", vaultRoleID, "Vault AppRole id; can also be passed using VAULT_ROLEID environment variable")
- flag.StringVar(&vaultRoleSecretIDFile, "db-credentials-vault-role-secretidfile", vaultRoleSecretIDFile, "Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable")
- flag.StringVar(&vaultRoleMountPoint, "db-credentials-vault-role-mountpoint", vaultRoleMountPoint, "Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable")
+ fs.StringVar(&vaultAddr, "db-credentials-vault-addr", vaultAddr, "URL to Vault server")
+ fs.DurationVar(&vaultTimeout, "db-credentials-vault-timeout", vaultTimeout, "Timeout for vault API operations")
+ fs.StringVar(&vaultCACert, "db-credentials-vault-tls-ca", vaultCACert, "Path to CA PEM for validating Vault server certificate")
+ fs.StringVar(&vaultPath, "db-credentials-vault-path", vaultPath, "Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds")
+ fs.DurationVar(&vaultCacheTTL, "db-credentials-vault-ttl", vaultCacheTTL, "How long to cache DB credentials from the Vault server")
+ fs.StringVar(&vaultTokenFile, "db-credentials-vault-tokenfile", vaultTokenFile, "Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable")
+ fs.StringVar(&vaultRoleID, "db-credentials-vault-roleid", vaultRoleID, "Vault AppRole id; can also be passed using VAULT_ROLEID environment variable")
+ fs.StringVar(&vaultRoleSecretIDFile, "db-credentials-vault-role-secretidfile", vaultRoleSecretIDFile, "Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable")
+ fs.StringVar(&vaultRoleMountPoint, "db-credentials-vault-role-mountpoint", vaultRoleMountPoint, "Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable")
})
}
}
diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go
index 9a03823e9ec..1a669f60faa 100644
--- a/go/vt/discovery/healthcheck.go
+++ b/go/vt/discovery/healthcheck.go
@@ -25,7 +25,7 @@ limitations under the License.
// Alternatively, use a Watcher implementation which will constantly watch
// a source (e.g. the topology) and add and remove tablets as they are
// added or removed from the source.
-// For a Watcher example have a look at NewCellTabletsWatcher().
+// For a Watcher example have a look at NewTopologyWatcher().
//
// Internally, the HealthCheck module is connected to each tablet and has a
// streaming RPC (StreamHealth) open to receive periodic health infos.
@@ -35,6 +35,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"hash/crc32"
"html/template"
@@ -45,7 +46,9 @@ import (
"time"
"github.com/spf13/pflag"
+ "golang.org/x/sync/semaphore"
+ "vitess.io/vitess/go/flagutil"
"vitess.io/vitess/go/netutil"
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/log"
@@ -79,6 +82,9 @@ var (
// tabletFilters are the keyspace|shard or keyrange filters to apply to the full set of tablets.
tabletFilters []string
+ // tabletFilterTags are the tablet tag filters (as key:value pairs) to apply to the full set of tablets.
+ tabletFilterTags flagutil.StringMapValue
+
// refreshInterval is the interval at which healthcheck refreshes its list of tablets from topo.
refreshInterval = 1 * time.Minute
@@ -86,10 +92,16 @@ var (
refreshKnownTablets = true
// topoReadConcurrency tells us how many topo reads are allowed in parallel.
- topoReadConcurrency = 32
+ topoReadConcurrency int64 = 32
+
+ // healthCheckDialConcurrency tells us how many healthcheck connections can be opened to tablets at once. This should be less than the golang max thread limit of 10000.
+ healthCheckDialConcurrency int64 = 1024
// How much to sleep between each check.
waitAvailableTabletInterval = 100 * time.Millisecond
+
+ // errKeyspacesToWatchAndTabletFilters is an error for cases where incompatible filters are defined.
+ errKeyspacesToWatchAndTabletFilters = errors.New("only one of --keyspaces_to_watch and --tablet_filters may be specified at a time")
)
// See the documentation for NewHealthCheck below for an explanation of these parameters.
@@ -147,19 +159,28 @@ func ParseTabletURLTemplateFromFlag() {
}
func init() {
- servenv.OnParseFor("vtgate", registerDiscoveryFlags)
- servenv.OnParseFor("vtcombo", registerDiscoveryFlags)
- servenv.OnParseFor("vtctld", registerDiscoveryFlags)
+ for _, cmd := range []string{"vtgate", "vtcombo"} {
+ servenv.OnParseFor(cmd, registerDiscoveryFlags)
+ servenv.OnParseFor(cmd, registerWebUIFlags)
+ }
+
+ servenv.OnParseFor("vtctld", registerWebUIFlags)
+ servenv.OnParseFor("vttablet", registerWebUIFlags)
}
func registerDiscoveryFlags(fs *pflag.FlagSet) {
+ fs.StringSliceVar(&tabletFilters, "tablet_filters", []string{}, "Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.")
+ fs.Var(&tabletFilterTags, "tablet-filter-tags", "Specifies a comma-separated list of tablet tags (as key:value pairs) to filter the tablets to watch.")
+ fs.Var((*topoproto.TabletTypeListFlag)(&AllowedTabletTypes), "allowed_tablet_types", "Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.")
+ fs.StringSliceVar(&KeyspacesToWatch, "keyspaces_to_watch", []string{}, "Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.")
+}
+
+func registerWebUIFlags(fs *pflag.FlagSet) {
fs.StringVar(&TabletURLTemplateString, "tablet_url_template", "http://{{.GetTabletHostPort}}", "Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this.")
fs.DurationVar(&refreshInterval, "tablet_refresh_interval", 1*time.Minute, "Tablet refresh interval.")
fs.BoolVar(&refreshKnownTablets, "tablet_refresh_known_tablets", true, "Whether to reload the tablet's address/port map from topo in case they change.")
- fs.IntVar(&topoReadConcurrency, "topo_read_concurrency", 32, "Concurrency of topo reads.")
- fs.StringSliceVar(&tabletFilters, "tablet_filters", []string{}, "Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.")
- fs.Var((*topoproto.TabletTypeListFlag)(&AllowedTabletTypes), "allowed_tablet_types", "Specifies the tablet types this vtgate is allowed to route queries to.")
- fs.StringSliceVar(&KeyspacesToWatch, "keyspaces_to_watch", []string{}, "Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.")
+ fs.Int64Var(&topoReadConcurrency, "topo_read_concurrency", 32, "Concurrency of topo reads.")
+ fs.Int64Var(&healthCheckDialConcurrency, "healthcheck-dial-concurrency", 1024, "Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000.")
ParseTabletURLTemplateFromFlag()
}
@@ -276,6 +297,29 @@ type HealthCheckImpl struct {
subMu sync.Mutex
// subscribers
subscribers map[chan *TabletHealth]struct{}
+ // healthCheckDialSem is used to limit how many healthcheck connections can be opened to tablets at once.
+ healthCheckDialSem *semaphore.Weighted
+}
+
+// NewVTGateHealthCheckFilters returns healthcheck filters for vtgate.
+func NewVTGateHealthCheckFilters() (filters TabletFilters, err error) {
+ if len(tabletFilters) > 0 {
+ if len(KeyspacesToWatch) > 0 {
+ return nil, errKeyspacesToWatchAndTabletFilters
+ }
+
+ fbs, err := NewFilterByShard(tabletFilters)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse tablet_filters value %q: %v", strings.Join(tabletFilters, ","), err)
+ }
+ filters = append(filters, fbs)
+ } else if len(KeyspacesToWatch) > 0 {
+ filters = append(filters, NewFilterByKeyspace(KeyspacesToWatch))
+ }
+ if len(tabletFilterTags) > 0 {
+ filters = append(filters, NewFilterByTabletTags(tabletFilterTags))
+ }
+ return filters, nil
}
// NewHealthCheck creates a new HealthCheck object.
@@ -299,10 +343,14 @@ type HealthCheckImpl struct {
//
// The localCell for this healthcheck
//
-// callback.
+// cellsToWatch.
+//
+// Is a list of cells to watch for tablets.
+//
+// filters.
//
-// A function to call when there is a primary change. Used to notify vtgate's buffer to stop buffering.
-func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Duration, topoServer *topo.Server, localCell, cellsToWatch string) *HealthCheckImpl {
+// Is one or more filters to apply when determining what tablets we want to stream healthchecks from.
+func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Duration, topoServer *topo.Server, localCell, cellsToWatch string, filters TabletFilter) *HealthCheckImpl {
log.Infof("loading tablets for cells: %v", cellsToWatch)
hc := &HealthCheckImpl{
@@ -310,6 +358,7 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur
cell: localCell,
retryDelay: retryDelay,
healthCheckTimeout: healthCheckTimeout,
+ healthCheckDialSem: semaphore.NewWeighted(healthCheckDialConcurrency),
healthByAlias: make(map[tabletAliasString]*tabletHealthCheck),
healthData: make(map[KeyspaceShardTabletType]map[tabletAliasString]*TabletHealth),
healthy: make(map[KeyspaceShardTabletType][]*TabletHealth),
@@ -317,7 +366,6 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur
cellAliases: make(map[string]string),
}
var topoWatchers []*TopologyWatcher
- var filter TabletFilter
cells := strings.Split(cellsToWatch, ",")
if cellsToWatch == "" {
cells = append(cells, localCell)
@@ -328,20 +376,8 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur
if c == "" {
continue
}
- if len(tabletFilters) > 0 {
- if len(KeyspacesToWatch) > 0 {
- log.Exitf("Only one of -keyspaces_to_watch and -tablet_filters may be specified at a time")
- }
- fbs, err := NewFilterByShard(tabletFilters)
- if err != nil {
- log.Exitf("Cannot parse tablet_filters parameter: %v", err)
- }
- filter = fbs
- } else if len(KeyspacesToWatch) > 0 {
- filter = NewFilterByKeyspace(KeyspacesToWatch)
- }
- topoWatchers = append(topoWatchers, NewCellTabletsWatcher(ctx, topoServer, hc, filter, c, refreshInterval, refreshKnownTablets, topoReadConcurrency))
+ topoWatchers = append(topoWatchers, NewTopologyWatcher(ctx, topoServer, hc, filters, c, refreshInterval, refreshKnownTablets, topoReadConcurrency))
}
hc.topoWatchers = topoWatchers
@@ -423,8 +459,29 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
hc.mu.Lock()
defer hc.mu.Unlock()
- key := KeyFromTablet(tablet)
tabletAlias := tabletAliasString(topoproto.TabletAliasString(tablet.Alias))
+ defer func() {
+ // We want to be sure the tablet is gone from the secondary
+ // maps even if it's already gone from the authoritative map.
+ // The tablet's type also may have recently changed as well,
+ // so ensure that the tablet we're removing is removed from
+ // any possible secondary map keys:
+ // key: keyspace.shard.tabletType -> val: map[tabletAlias]tabletHealth
+ for _, tabletType := range topoproto.AllTabletTypes {
+ key := KeyspaceShardTabletType(fmt.Sprintf("%s.%s.%s", tablet.Keyspace, tablet.Shard, topoproto.TabletTypeLString(tabletType)))
+ // delete from map by keyspace.shard.tabletType
+ ths, ok := hc.healthData[key]
+ if !ok {
+ continue
+ }
+ delete(ths, tabletAlias)
+ // delete from healthy list
+ healthy, ok := hc.healthy[key]
+ if ok && len(healthy) > 0 {
+ hc.recomputeHealthy(key)
+ }
+ }
+ }()
// delete from authoritative map
th, ok := hc.healthByAlias[tabletAlias]
if !ok {
@@ -435,18 +492,6 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
// which will call finalizeConn, which will close the connection.
th.cancelFunc()
delete(hc.healthByAlias, tabletAlias)
- // delete from map by keyspace.shard.tabletType
- ths, ok := hc.healthData[key]
- if !ok {
- log.Warningf("We have no health data for target: %v", key)
- return
- }
- delete(ths, tabletAlias)
- // delete from healthy list
- healthy, ok := hc.healthy[key]
- if ok && len(healthy) > 0 {
- hc.recomputeHealthy(key)
- }
}
func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Target, trivialUpdate bool, up bool) {
@@ -685,30 +730,8 @@ func (hc *HealthCheckImpl) WaitForAllServingTablets(ctx context.Context, targets
return hc.waitForTablets(ctx, targets, true)
}
-// FilterTargetsByKeyspaces only returns the targets that are part of the provided keyspaces
-func FilterTargetsByKeyspaces(keyspaces []string, targets []*query.Target) []*query.Target {
- filteredTargets := make([]*query.Target, 0)
-
- // Keep them all if there are no keyspaces to watch
- if len(KeyspacesToWatch) == 0 {
- return append(filteredTargets, targets...)
- }
-
- // Let's remove from the target shards that are not in the keyspaceToWatch list.
- for _, target := range targets {
- for _, keyspaceToWatch := range keyspaces {
- if target.Keyspace == keyspaceToWatch {
- filteredTargets = append(filteredTargets, target)
- }
- }
- }
- return filteredTargets
-}
-
// waitForTablets is the internal method that polls for tablets.
func (hc *HealthCheckImpl) waitForTablets(ctx context.Context, targets []*query.Target, requireServing bool) error {
- targets = FilterTargetsByKeyspaces(KeyspacesToWatch, targets)
-
for {
// We nil targets as we find them.
allPresent := true
@@ -785,7 +808,7 @@ func (hc *HealthCheckImpl) TabletConnection(alias *topodata.TabletAlias, target
// TODO: test that throws this error
return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "tablet: %v is either down or nonexistent", alias)
}
- return thc.Connection(), nil
+ return thc.Connection(hc), nil
}
// getAliasByCell should only be called while holding hc.mu
diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go
index 5ba47f69025..28a0dcf91fe 100644
--- a/go/vt/discovery/healthcheck_test.go
+++ b/go/vt/discovery/healthcheck_test.go
@@ -64,6 +64,77 @@ func init() {
refreshInterval = time.Minute
}
+func TestNewVTGateHealthCheckFilters(t *testing.T) {
+ defer func() {
+ KeyspacesToWatch = nil
+ tabletFilters = nil
+ tabletFilterTags = nil
+ }()
+
+ testCases := []struct {
+ name string
+ keyspacesToWatch []string
+ tabletFilters []string
+ tabletFilterTags map[string]string
+ expectedError string
+ expectedFilterTypes []any
+ }{
+ {
+ name: "noFilters",
+ },
+ {
+ name: "tabletFilters",
+ tabletFilters: []string{"ks1|-80"},
+ expectedFilterTypes: []any{&FilterByShard{}},
+ },
+ {
+ name: "keyspacesToWatch",
+ keyspacesToWatch: []string{"ks1"},
+ expectedFilterTypes: []any{&FilterByKeyspace{}},
+ },
+ {
+ name: "tabletFiltersAndTags",
+ tabletFilters: []string{"ks1|-80"},
+ tabletFilterTags: map[string]string{"test": "true"},
+ expectedFilterTypes: []any{&FilterByShard{}, &FilterByTabletTags{}},
+ },
+ {
+ name: "keyspacesToWatchAndTags",
+ tabletFilterTags: map[string]string{"test": "true"},
+ keyspacesToWatch: []string{"ks1"},
+ expectedFilterTypes: []any{&FilterByKeyspace{}, &FilterByTabletTags{}},
+ },
+ {
+ name: "failKeyspacesToWatchAndFilters",
+ tabletFilters: []string{"ks1|-80"},
+ keyspacesToWatch: []string{"ks1"},
+ expectedError: errKeyspacesToWatchAndTabletFilters.Error(),
+ },
+ {
+ name: "failInvalidTabletFilters",
+ tabletFilters: []string{"shouldfail!@#!"},
+ expectedError: "failed to parse tablet_filters value \"shouldfail!@#!\": invalid FilterByShard parameter: shouldfail!@#!",
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ KeyspacesToWatch = testCase.keyspacesToWatch
+ tabletFilters = testCase.tabletFilters
+ tabletFilterTags = testCase.tabletFilterTags
+
+ filters, err := NewVTGateHealthCheckFilters()
+ if testCase.expectedError != "" {
+ assert.EqualError(t, err, testCase.expectedError)
+ }
+ assert.Len(t, filters, len(testCase.expectedFilterTypes))
+ for i, filter := range filters {
+ assert.IsType(t, testCase.expectedFilterTypes[i], filter)
+ }
+ })
+ }
+}
+
func TestHealthCheck(t *testing.T) {
// reset error counters
hcErrorCounters.ResetAll()
@@ -645,27 +716,6 @@ func TestWaitForAllServingTablets(t *testing.T) {
err = hc.WaitForAllServingTablets(ctx, targets)
assert.NotNil(t, err, "error should not be nil (there are no tablets on this keyspace")
-
- targets = []*querypb.Target{
-
- {
- Keyspace: tablet.Keyspace,
- Shard: tablet.Shard,
- TabletType: tablet.Type,
- },
- {
- Keyspace: "newkeyspace",
- Shard: tablet.Shard,
- TabletType: tablet.Type,
- },
- }
-
- KeyspacesToWatch = []string{tablet.Keyspace}
-
- err = hc.WaitForAllServingTablets(ctx, targets)
- assert.Nil(t, err, "error should be nil. Keyspace with no tablets is filtered")
-
- KeyspacesToWatch = []string{}
}
// TestRemoveTablet tests the behavior when a tablet goes away.
@@ -684,7 +734,7 @@ func TestRemoveTablet(t *testing.T) {
// there will be a first result, get and discard it
<-resultChan
- shr := &querypb.StreamHealthResponse{
+ shrReplica := &querypb.StreamHealthResponse{
TabletAlias: tablet.Alias,
Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
Serving: true,
@@ -698,7 +748,7 @@ func TestRemoveTablet(t *testing.T) {
Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2},
PrimaryTermStartTime: 0,
}}
- input <- shr
+ input <- shrReplica
<-resultChan
// check it's there
a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
@@ -708,6 +758,71 @@ func TestRemoveTablet(t *testing.T) {
hc.RemoveTablet(tablet)
a = hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
assert.Empty(t, a, "wrong result, expected empty list")
+
+ // Now confirm that when a tablet's type changes between when it's added to the
+ // cache and when it's removed, that the tablet is entirely removed from the
+ // cache since in the secondary maps it's keyed in part by tablet type.
+ // Note: we are using GetTabletStats here to check the healthData map (rather
+ // than the healthy map that we checked above) because that is the data
+ // structure that is used when printing the contents of the healthcheck cache
+ // in the /debug/status endpoint and in the SHOW VITESS_TABLETS; SQL command
+ // output.
+
+ // Add the tablet back.
+ hc.AddTablet(tablet)
+ // Receive and discard the initial result as we have not yet sent the first
+ // StreamHealthResponse with the dynamic serving and stats information.
+ <-resultChan
+ // Send the first StreamHealthResponse with the dynamic serving and stats
+ // information.
+ input <- shrReplica
+ <-resultChan
+ // Confirm it's there in the cache.
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
+ mustMatch(t, want, a, "unexpected result")
+
+ // Change the tablet type to RDONLY.
+ tablet.Type = topodatapb.TabletType_RDONLY
+ shrRdonly := &querypb.StreamHealthResponse{
+ TabletAlias: tablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY},
+ Serving: true,
+ TabletExternallyReparentedTimestamp: 0,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4},
+ }
+
+ // Now Replace it, which does a Remove and Add. The tablet should be removed
+ // from the cache and all its maps even though the tablet type had changed
+ // in-between the initial Add and Remove.
+ hc.ReplaceTablet(tablet, tablet)
+ // Receive and discard the initial result as we have not yet sent the first
+ // StreamHealthResponse with the dynamic serving and stats information.
+ <-resultChan
+ // Confirm that the old entry is gone.
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
+ assert.Empty(t, a, "wrong result, expected empty list")
+ // Send the first StreamHealthResponse with the dynamic serving and stats
+ // information.
+ input <- shrRdonly
+ <-resultChan
+ // Confirm that the new entry is there in the cache.
+ want = []*TabletHealth{{
+ Tablet: tablet,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY},
+ Serving: true,
+ Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4},
+ PrimaryTermStartTime: 0,
+ }}
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY})
+ mustMatch(t, want, a, "unexpected result")
+
+ // Delete the tablet, confirm again that it's gone in both tablet type
+ // forms.
+ hc.RemoveTablet(tablet)
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA})
+ assert.Empty(t, a, "wrong result, expected empty list")
+ a = hc.GetTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY})
+ assert.Empty(t, a, "wrong result, expected empty list")
}
// TestGetHealthyTablets tests the functionality of GetHealthyTabletStats.
@@ -899,7 +1014,7 @@ func TestGetHealthyTablets(t *testing.T) {
func TestPrimaryInOtherCell(t *testing.T) {
ts := memorytopo.NewServer("cell1", "cell2")
- hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2")
+ hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2", nil)
defer hc.Close()
// add a tablet as primary in different cell
@@ -956,7 +1071,7 @@ func TestPrimaryInOtherCell(t *testing.T) {
func TestReplicaInOtherCell(t *testing.T) {
ts := memorytopo.NewServer("cell1", "cell2")
- hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2")
+ hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2", nil)
defer hc.Close()
// add a tablet as replica
@@ -1058,7 +1173,7 @@ func TestReplicaInOtherCell(t *testing.T) {
func TestCellAliases(t *testing.T) {
ts := memorytopo.NewServer("cell1", "cell2")
- hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2")
+ hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2", nil)
defer hc.Close()
cellsAlias := &topodatapb.CellsAlias{
@@ -1204,7 +1319,7 @@ func tabletDialer(tablet *topodatapb.Tablet, _ grpcclient.FailFast) (queryservic
}
func createTestHc(ts *topo.Server) *HealthCheckImpl {
- return NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell", "")
+ return NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell", "", nil)
}
type fakeConn struct {
diff --git a/go/vt/discovery/keyspace_events.go b/go/vt/discovery/keyspace_events.go
index 0b3fa7e9efe..9f3381958a9 100644
--- a/go/vt/discovery/keyspace_events.go
+++ b/go/vt/discovery/keyspace_events.go
@@ -23,12 +23,14 @@ import (
"google.golang.org/protobuf/proto"
+ "vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
// KeyspaceEventWatcher is an auxiliary watcher that watches all availability incidents
@@ -65,7 +67,7 @@ type KeyspaceEvent struct {
type ShardEvent struct {
Tablet *topodatapb.TabletAlias
- Target *query.Target
+ Target *querypb.Target
Serving bool
}
@@ -124,17 +126,27 @@ func (kss *keyspaceState) beingResharded(currentShard string) bool {
kss.mu.Lock()
defer kss.mu.Unlock()
- // if the keyspace is gone, or if it has no known availability events, the keyspace
- // cannot be in the middle of a resharding operation
+ // If the keyspace is gone, has no known availability events, or is in the middle of a
+ // MoveTables then the keyspace cannot be in the middle of a resharding operation.
if kss.deleted || kss.consistent {
return false
}
- // for all the known shards, try to find a primary shard besides the one we're trying to access
- // and which is currently healthy. if there are other healthy primaries in the keyspace, it means
- // we're in the middle of a resharding operation
+ // If there are unequal and overlapping shards in the keyspace and any of them are
+ // currently serving then we assume that we are in the middle of a Reshard.
+ _, ckr, err := topo.ValidateShardName(currentShard)
+ if err != nil || ckr == nil { // Assume not and avoid potential panic
+ return false
+ }
for shard, sstate := range kss.shards {
- if shard != currentShard && sstate.serving {
+ if !sstate.serving || shard == currentShard {
+ continue
+ }
+ _, skr, err := topo.ValidateShardName(shard)
+ if err != nil || skr == nil { // Assume not and avoid potential panic
+ return false
+ }
+ if key.KeyRangesIntersect(ckr, skr) {
return true
}
}
@@ -143,7 +155,7 @@ func (kss *keyspaceState) beingResharded(currentShard string) bool {
}
type shardState struct {
- target *query.Target
+ target *querypb.Target
serving bool
externallyReparented int64
currentPrimary *topodatapb.TabletAlias
@@ -269,10 +281,12 @@ func (kss *keyspaceState) ensureConsistentLocked() {
Serving: sstate.serving,
})
- log.Infof("keyspace event resolved: %s/%s is now consistent (serving: %v)",
- sstate.target.Keyspace, sstate.target.Keyspace,
- sstate.serving,
- )
+ // Disable it due to log storm in production
+ // thread https://slack-pde.slack.com/archives/C06CPL4HMED/p1729896804879749
+ // log.Infof("keyspace event resolved: %s/%s is now consistent (serving: %v)",
+ // sstate.target.Keyspace, sstate.target.Keyspace,
+ // sstate.serving,
+ // )
if !sstate.serving {
delete(kss.shards, shard)
@@ -426,7 +440,7 @@ func (kew *KeyspaceEventWatcher) getKeyspaceStatus(keyspace string) *keyspaceSta
// This is not a fully accurate heuristic, but it's good enough that we'd want to buffer the
// request for the given target under the assumption that the reason why it cannot be completed
// right now is transitory.
-func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *query.Target) bool {
+func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *querypb.Target) bool {
if target.TabletType != topodatapb.TabletType_PRIMARY {
return false
}
@@ -446,7 +460,7 @@ func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *query.Target) bo
// The shard state keeps track of the current primary and the last externally reparented time, which we can use
// to determine that there was a serving primary which now became non serving. This is only possible in a DemotePrimary
// RPC which are only called from ERS and PRS. So buffering will stop when these operations succeed.
-func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(target *query.Target) bool {
+func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(target *querypb.Target) bool {
if target.TabletType != topodatapb.TabletType_PRIMARY {
return false
}
diff --git a/go/vt/discovery/keyspace_events_test.go b/go/vt/discovery/keyspace_events_test.go
new file mode 100644
index 00000000000..bd1b6def62f
--- /dev/null
+++ b/go/vt/discovery/keyspace_events_test.go
@@ -0,0 +1,317 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package discovery
+
+import (
+ "context"
+ "encoding/hex"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/faketopo"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+)
+
+func TestSrvKeyspaceWithNilNewKeyspace(t *testing.T) {
+ cell := "cell"
+ keyspace := "testks"
+ factory := faketopo.NewFakeTopoFactory()
+ factory.AddCell(cell)
+ ts := faketopo.NewFakeTopoServer(factory)
+ ts2 := &fakeTopoServer{}
+ hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, cell, "", nil)
+ defer hc.Close()
+ kew := NewKeyspaceEventWatcher(context.Background(), ts2, hc, cell)
+ kss := &keyspaceState{
+ kew: kew,
+ keyspace: keyspace,
+ shards: make(map[string]*shardState),
+ }
+ kss.lastKeyspace = &topodatapb.SrvKeyspace{
+ ServedFrom: []*topodatapb.SrvKeyspace_ServedFrom{
+ {
+ TabletType: topodatapb.TabletType_PRIMARY,
+ Keyspace: keyspace,
+ },
+ },
+ }
+ require.True(t, kss.onSrvKeyspace(nil, nil))
+}
+
+// TestKeyspaceEventTypes confirms that the keyspace event watcher determines
+// that the unavailability event is caused by the correct scenario. We should
+// consider it to be caused by a resharding operation when the following
+// conditions are present:
+// 1. The keyspace is inconsistent (in the middle of an availability event)
+// 2. The target tablet is a primary
+// 3. The keyspace has overlapping shards
+// 4. The overlapping shard's tablet is serving
+// And we should consider the cause to be a primary not serving when the
+// following conditions exist:
+// 1. The keyspace is inconsistent (in the middle of an availability event)
+// 2. The target tablet is a primary
+// 3. The target tablet is not serving
+// 4. The shard's externallyReparented time is not 0
+// 5. The shard's currentPrimary state is not nil
+// We should never consider both as a possible cause given the same
+// keyspace state.
+func TestKeyspaceEventTypes(t *testing.T) {
+ cell := "cell"
+ keyspace := "testks"
+ factory := faketopo.NewFakeTopoFactory()
+ factory.AddCell(cell)
+ ts := faketopo.NewFakeTopoServer(factory)
+ ts2 := &fakeTopoServer{}
+ hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, cell, "", nil)
+ defer hc.Close()
+ kew := NewKeyspaceEventWatcher(context.Background(), ts2, hc, cell)
+
+ type testCase struct {
+ name string
+ kss *keyspaceState
+ shardToCheck string
+ expectResharding bool
+ expectPrimaryNotServing bool
+ }
+
+ testCases := []testCase{
+ {
+ name: "one to two resharding in progress",
+ kss: &keyspaceState{
+ kew: kew,
+ keyspace: keyspace,
+ shards: map[string]*shardState{
+ "-": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "-",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ },
+ "-80": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "-80",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: true,
+ },
+ "80-": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "80-",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ },
+ },
+ consistent: false,
+ },
+ shardToCheck: "-",
+ expectResharding: true,
+ expectPrimaryNotServing: false,
+ },
+ {
+ name: "two to four resharding in progress",
+ kss: &keyspaceState{
+ kew: kew,
+ keyspace: keyspace,
+ shards: map[string]*shardState{
+ "-80": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "-80",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ },
+ "80-": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "80-",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: true,
+ },
+ "-40": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "-40",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: true,
+ },
+ "40-80": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "40-80",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: true,
+ },
+ "80-c0": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "80-c0",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ },
+ "c0-": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "c0-",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ },
+ },
+ consistent: false,
+ },
+ shardToCheck: "-80",
+ expectResharding: true,
+ expectPrimaryNotServing: false,
+ },
+ {
+ name: "unsharded primary not serving",
+ kss: &keyspaceState{
+ kew: kew,
+ keyspace: keyspace,
+ shards: map[string]*shardState{
+ "-": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "-",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ externallyReparented: time.Now().UnixNano(),
+ currentPrimary: &topodatapb.TabletAlias{
+ Cell: cell,
+ Uid: 100,
+ },
+ },
+ },
+ consistent: false,
+ },
+ shardToCheck: "-",
+ expectResharding: false,
+ expectPrimaryNotServing: true,
+ },
+ {
+ name: "sharded primary not serving",
+ kss: &keyspaceState{
+ kew: kew,
+ keyspace: keyspace,
+ shards: map[string]*shardState{
+ "-80": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "-80",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: false,
+ externallyReparented: time.Now().UnixNano(),
+ currentPrimary: &topodatapb.TabletAlias{
+ Cell: cell,
+ Uid: 100,
+ },
+ },
+ "80-": {
+ target: &querypb.Target{
+ Keyspace: keyspace,
+ Shard: "80-",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ serving: true,
+ },
+ },
+ consistent: false,
+ },
+ shardToCheck: "-80",
+ expectResharding: false,
+ expectPrimaryNotServing: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ kew.mu.Lock()
+ kew.keyspaces[keyspace] = tc.kss
+ kew.mu.Unlock()
+
+ require.NotNil(t, tc.kss.shards[tc.shardToCheck], "the specified shardToCheck of %q does not exist in the shardState", tc.shardToCheck)
+
+ resharding := kew.TargetIsBeingResharded(tc.kss.shards[tc.shardToCheck].target)
+ require.Equal(t, resharding, tc.expectResharding, "TargetIsBeingResharded should return %t", tc.expectResharding)
+
+ primaryDown := kew.PrimaryIsNotServing(tc.kss.shards[tc.shardToCheck].target)
+ require.Equal(t, primaryDown, tc.expectPrimaryNotServing, "PrimaryIsNotServing should return %t", tc.expectPrimaryNotServing)
+ })
+ }
+}
+
+type fakeTopoServer struct {
+}
+
+// GetTopoServer returns the full topo.Server instance.
+func (f *fakeTopoServer) GetTopoServer() (*topo.Server, error) {
+ return nil, nil
+}
+
+// GetSrvKeyspaceNames returns the list of keyspaces served in
+// the provided cell.
+func (f *fakeTopoServer) GetSrvKeyspaceNames(ctx context.Context, cell string, staleOK bool) ([]string, error) {
+ return []string{"ks1"}, nil
+}
+
+// GetSrvKeyspace returns the SrvKeyspace for a cell/keyspace.
+func (f *fakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topodatapb.SrvKeyspace, error) {
+ zeroHexBytes, _ := hex.DecodeString("")
+ eightyHexBytes, _ := hex.DecodeString("80")
+ ks := &topodatapb.SrvKeyspace{
+ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
+ {
+ ServedType: topodatapb.TabletType_PRIMARY,
+ ShardReferences: []*topodatapb.ShardReference{
+ {Name: "-80", KeyRange: &topodatapb.KeyRange{Start: zeroHexBytes, End: eightyHexBytes}},
+ {Name: "80-", KeyRange: &topodatapb.KeyRange{Start: eightyHexBytes, End: zeroHexBytes}},
+ },
+ },
+ },
+ }
+ return ks, nil
+}
+
+func (f *fakeTopoServer) WatchSrvKeyspace(ctx context.Context, cell, keyspace string, callback func(*topodatapb.SrvKeyspace, error) bool) {
+ ks, err := f.GetSrvKeyspace(ctx, cell, keyspace)
+ callback(ks, err)
+}
+
+// WatchSrvVSchema starts watching the SrvVSchema object for
+// the provided cell. It will call the callback when
+// a new value or an error occurs.
+func (f *fakeTopoServer) WatchSrvVSchema(ctx context.Context, cell string, callback func(*vschemapb.SrvVSchema, error) bool) {
+
+}
diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go
index 71ab78fd15b..c084b580e40 100644
--- a/go/vt/discovery/replicationlag.go
+++ b/go/vt/discovery/replicationlag.go
@@ -75,13 +75,13 @@ func SetMinNumTablets(numTablets int) {
minNumTablets = numTablets
}
-// IsReplicationLagHigh verifies that the given LegacytabletHealth refers to a tablet with high
+// IsReplicationLagHigh verifies that the given TabletHealth refers to a tablet with high
// replication lag, i.e. higher than the configured discovery_low_replication_lag flag.
func IsReplicationLagHigh(tabletHealth *TabletHealth) bool {
return float64(tabletHealth.Stats.ReplicationLagSeconds) > lowReplicationLag.Seconds()
}
-// IsReplicationLagVeryHigh verifies that the given LegacytabletHealth refers to a tablet with very high
+// IsReplicationLagVeryHigh verifies that the given TabletHealth refers to a tablet with very high
// replication lag, i.e. higher than the configured discovery_high_replication_lag_minimum_serving flag.
func IsReplicationLagVeryHigh(tabletHealth *TabletHealth) bool {
return float64(tabletHealth.Stats.ReplicationLagSeconds) > highReplicationLagMinServing.Seconds()
@@ -117,7 +117,7 @@ func FilterStatsByReplicationLag(tabletHealthList []*TabletHealth) []*TabletHeal
return filterStatsByLag(tabletHealthList)
}
res := filterStatsByLagWithLegacyAlgorithm(tabletHealthList)
- // run the filter again if exactly one tablet is removed,
+ // Run the filter again if exactly one tablet is removed,
// and we have spare tablets.
if len(res) > minNumTablets && len(res) == len(tabletHealthList)-1 {
res = filterStatsByLagWithLegacyAlgorithm(res)
@@ -128,12 +128,12 @@ func FilterStatsByReplicationLag(tabletHealthList []*TabletHealth) []*TabletHeal
func filterStatsByLag(tabletHealthList []*TabletHealth) []*TabletHealth {
list := make([]tabletLagSnapshot, 0, len(tabletHealthList))
- // filter non-serving tablets and those with very high replication lag
+ // Filter out non-serving tablets and those with very high replication lag.
for _, ts := range tabletHealthList {
if !ts.Serving || ts.LastError != nil || ts.Stats == nil || IsReplicationLagVeryHigh(ts) {
continue
}
- // Pull the current replication lag for a stable sort later.
+ // Save the current replication lag for a stable sort later.
list = append(list, tabletLagSnapshot{
ts: ts,
replag: ts.Stats.ReplicationLagSeconds})
@@ -142,7 +142,7 @@ func filterStatsByLag(tabletHealthList []*TabletHealth) []*TabletHealth {
// Sort by replication lag.
sort.Sort(tabletLagSnapshotList(list))
- // Pick those with low replication lag, but at least minNumTablets tablets regardless.
+ // Pick tablets with low replication lag, but at least minNumTablets tablets regardless.
res := make([]*TabletHealth, 0, len(list))
for i := 0; i < len(list); i++ {
if !IsReplicationLagHigh(list[i].ts) || i < minNumTablets {
@@ -154,7 +154,7 @@ func filterStatsByLag(tabletHealthList []*TabletHealth) []*TabletHealth {
func filterStatsByLagWithLegacyAlgorithm(tabletHealthList []*TabletHealth) []*TabletHealth {
list := make([]*TabletHealth, 0, len(tabletHealthList))
- // filter non-serving tablets
+ // Filter out non-serving tablets.
for _, ts := range tabletHealthList {
if !ts.Serving || ts.LastError != nil || ts.Stats == nil {
continue
@@ -164,7 +164,7 @@ func filterStatsByLagWithLegacyAlgorithm(tabletHealthList []*TabletHealth) []*Ta
if len(list) <= 1 {
return list
}
- // if all have low replication lag (<=30s), return all tablets.
+ // If all tablets have low replication lag (<=30s), return all of them.
allLowLag := true
for _, ts := range list {
if IsReplicationLagHigh(ts) {
@@ -175,12 +175,12 @@ func filterStatsByLagWithLegacyAlgorithm(tabletHealthList []*TabletHealth) []*Ta
if allLowLag {
return list
}
- // filter those affecting "mean" lag significantly
- // calculate mean for all tablets
+ // We want to filter out tablets that are affecting "mean" lag significantly.
+ // We first calculate the mean across all tablets.
res := make([]*TabletHealth, 0, len(list))
m, _ := mean(list, -1)
for i, ts := range list {
- // calculate mean by excluding ith tablet
+ // Now we calculate the mean by excluding ith tablet
mi, _ := mean(list, i)
if float64(mi) > float64(m)*0.7 {
res = append(res, ts)
@@ -189,9 +189,11 @@ func filterStatsByLagWithLegacyAlgorithm(tabletHealthList []*TabletHealth) []*Ta
if len(res) >= minNumTablets {
return res
}
- // return at least minNumTablets tablets to avoid over loading,
- // if there is enough tablets with replication lag < highReplicationLagMinServing.
- // Pull the current replication lag for a stable sort.
+
+ // We want to return at least minNumTablets tablets to avoid overloading,
+ // as long as there are enough tablets with replication lag < highReplicationLagMinServing.
+
+ // Save the current replication lag for a stable sort.
snapshots := make([]tabletLagSnapshot, 0, len(list))
for _, ts := range list {
if !IsReplicationLagVeryHigh(ts) {
diff --git a/go/vt/discovery/tablet_health_check.go b/go/vt/discovery/tablet_health_check.go
index f0ad9b0a2ac..05ab47dee05 100644
--- a/go/vt/discovery/tablet_health_check.go
+++ b/go/vt/discovery/tablet_health_check.go
@@ -19,6 +19,7 @@ package discovery
import (
"context"
"fmt"
+ "net"
"strings"
"sync"
"time"
@@ -34,12 +35,16 @@ import (
"vitess.io/vitess/go/vt/vttablet/queryservice"
"vitess.io/vitess/go/vt/vttablet/tabletconn"
+ "google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/proto/topodata"
)
+// withDialerContextOnce ensures grpc.WithDialContext() is added once to the options.
+var withDialerContextOnce sync.Once
+
// tabletHealthCheck maintains the health status of a tablet. A map of this
// structure is maintained in HealthCheck.
type tabletHealthCheck struct {
@@ -123,8 +128,8 @@ func (thc *tabletHealthCheck) setServingState(serving bool, reason string) {
}
// stream streams healthcheck responses to callback.
-func (thc *tabletHealthCheck) stream(ctx context.Context, callback func(*query.StreamHealthResponse) error) error {
- conn := thc.Connection()
+func (thc *tabletHealthCheck) stream(ctx context.Context, hc *HealthCheckImpl, callback func(*query.StreamHealthResponse) error) error {
+ conn := thc.Connection(hc)
if conn == nil {
// This signals the caller to retry
return nil
@@ -137,14 +142,34 @@ func (thc *tabletHealthCheck) stream(ctx context.Context, callback func(*query.S
return err
}
-func (thc *tabletHealthCheck) Connection() queryservice.QueryService {
+func (thc *tabletHealthCheck) Connection(hc *HealthCheckImpl) queryservice.QueryService {
thc.connMu.Lock()
defer thc.connMu.Unlock()
- return thc.connectionLocked()
+ return thc.connectionLocked(hc)
+}
+
+func healthCheckDialerFactory(hc *HealthCheckImpl) func(ctx context.Context, addr string) (net.Conn, error) {
+ return func(ctx context.Context, addr string) (net.Conn, error) {
+ // Limit the number of healthcheck connections opened in parallel to avoid high OS-thread
+ // usage due to blocking networking syscalls (eg: DNS lookups, TCP connection opens,
+ // etc). Without this limit it is possible for vtgates watching >10k tablets to hit
+ // the panic: 'runtime: program exceeds 10000-thread limit'.
+ if err := hc.healthCheckDialSem.Acquire(ctx, 1); err != nil {
+ return nil, err
+ }
+ defer hc.healthCheckDialSem.Release(1)
+ var dialer net.Dialer
+ return dialer.DialContext(ctx, "tcp", addr)
+ }
}
-func (thc *tabletHealthCheck) connectionLocked() queryservice.QueryService {
+func (thc *tabletHealthCheck) connectionLocked(hc *HealthCheckImpl) queryservice.QueryService {
if thc.Conn == nil {
+ withDialerContextOnce.Do(func() {
+ grpcclient.RegisterGRPCDialOptions(func(opts []grpc.DialOption) ([]grpc.DialOption, error) {
+ return append(opts, grpc.WithContextDialer(healthCheckDialerFactory(hc))), nil
+ })
+ })
conn, err := tabletconn.GetDialer()(thc.Tablet, grpcclient.FailFast(true))
if err != nil {
thc.LastError = err
@@ -273,7 +298,7 @@ func (thc *tabletHealthCheck) checkConn(hc *HealthCheckImpl) {
}()
// Read stream health responses.
- err := thc.stream(streamCtx, func(shr *query.StreamHealthResponse) error {
+ err := thc.stream(streamCtx, hc, func(shr *query.StreamHealthResponse) error {
// We received a message. Reset the back-off.
retryDelay = hc.retryDelay
// Don't block on send to avoid deadlocks.
diff --git a/go/vt/discovery/tablet_picker.go b/go/vt/discovery/tablet_picker.go
index cb0449c6191..dcf4e05da00 100644
--- a/go/vt/discovery/tablet_picker.go
+++ b/go/vt/discovery/tablet_picker.go
@@ -17,7 +17,9 @@ limitations under the License.
package discovery
import (
+ "context"
"fmt"
+ "io"
"math/rand"
"sort"
"strings"
@@ -25,20 +27,35 @@ import (
"time"
"vitess.io/vitess/go/stats"
-
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+)
- "vitess.io/vitess/go/vt/vttablet/tabletconn"
+type TabletPickerCellPreference int
- "vitess.io/vitess/go/vt/log"
+const (
+ // PreferLocalWithAlias gives preference to the local cell first, then specified cells, if any.
+ // This is the default when no other option is provided.
+ TabletPickerCellPreference_PreferLocalWithAlias TabletPickerCellPreference = iota
+ // OnlySpecified only picks tablets from the list of cells given.
+ TabletPickerCellPreference_OnlySpecified
+)
- "context"
+type TabletPickerTabletOrder int
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/vterrors"
+const (
+ // All provided tablet types are given equal priority. This is the default.
+ TabletPickerTabletOrder_Any TabletPickerTabletOrder = iota
+ // Provided tablet types are expected to be prioritized in the given order.
+ TabletPickerTabletOrder_InOrder
)
var (
@@ -46,6 +63,16 @@ var (
muTabletPickerRetryDelay sync.Mutex
globalTPStats *tabletPickerStats
inOrderHint = "in_order:"
+
+ tabletPickerCellPreferenceMap = map[string]TabletPickerCellPreference{
+ "preferlocalwithalias": TabletPickerCellPreference_PreferLocalWithAlias,
+ "onlyspecified": TabletPickerCellPreference_OnlySpecified,
+ }
+
+ tabletPickerTabletOrderMap = map[string]TabletPickerTabletOrder{
+ "any": TabletPickerTabletOrder_Any,
+ "inorder": TabletPickerTabletOrder_InOrder,
+ }
)
// GetTabletPickerRetryDelay synchronizes changes to tabletPickerRetryDelay. Used in tests only at the moment
@@ -62,18 +89,66 @@ func SetTabletPickerRetryDelay(delay time.Duration) {
tabletPickerRetryDelay = delay
}
+type TabletPickerOptions struct {
+ CellPreference string
+ TabletOrder string
+}
+
+func parseTabletPickerCellPreferenceString(str string) (TabletPickerCellPreference, error) {
+ // return default if blank
+ if str == "" {
+ return TabletPickerCellPreference_PreferLocalWithAlias, nil
+ }
+
+ if c, ok := tabletPickerCellPreferenceMap[strings.ToLower(str)]; ok {
+ return c, nil
+ }
+
+ return -1, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid cell preference: %v", str)
+}
+
+func parseTabletPickerTabletOrderString(str string) (TabletPickerTabletOrder, error) {
+ // return default if blank
+ if str == "" {
+ return TabletPickerTabletOrder_Any, nil
+ }
+
+ if o, ok := tabletPickerTabletOrderMap[strings.ToLower(str)]; ok {
+ return o, nil
+ }
+
+ return -1, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid tablet order type: %v", str)
+}
+
+type localCellInfo struct {
+ localCell string
+ cellsInAlias map[string]string
+}
+
// TabletPicker gives a simplified API for picking tablets.
type TabletPicker struct {
- ts *topo.Server
- cells []string
- keyspace string
- shard string
- tabletTypes []topodatapb.TabletType
- inOrder bool
+ ts *topo.Server
+ cells []string
+ keyspace string
+ shard string
+ tabletTypes []topodatapb.TabletType
+ inOrder bool
+ cellPref TabletPickerCellPreference
+ localCellInfo localCellInfo
+ // This map is keyed on the results of TabletAlias.String().
+ ignoreTablets map[string]struct{}
}
// NewTabletPicker returns a TabletPicker.
-func NewTabletPicker(ts *topo.Server, cells []string, keyspace, shard, tabletTypesStr string) (*TabletPicker, error) {
+func NewTabletPicker(
+ ctx context.Context,
+ ts *topo.Server,
+ cells []string,
+ localCell, keyspace, shard, tabletTypesStr string,
+ options TabletPickerOptions,
+ ignoreTablets ...*topodatapb.TabletAlias,
+) (*TabletPicker, error) {
+ // Keep inOrder parsing here for backward compatability until TabletPickerTabletOrder is fully adopted.
tabletTypes, inOrder, err := ParseTabletTypesAndOrder(tabletTypesStr)
if err != nil {
return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "failed to parse list of tablet types: %v", tabletTypesStr)
@@ -92,23 +167,134 @@ func NewTabletPicker(ts *topo.Server, cells []string, keyspace, shard, tabletTyp
return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION,
fmt.Sprintf("Missing required field(s) for tablet picker: %s", strings.Join(missingFields, ", ")))
}
- return &TabletPicker{
- ts: ts,
- cells: cells,
- keyspace: keyspace,
- shard: shard,
- tabletTypes: tabletTypes,
- inOrder: inOrder,
- }, nil
+
+ // Resolve tablet picker options
+ cellPref, err := parseTabletPickerCellPreferenceString(options.CellPreference)
+ if err != nil {
+ return nil, err
+ }
+
+ // For backward compatibility only parse the options for tablet ordering
+ // if the in_order hint wasn't already specified. Otherwise it could be overridden.
+ // We can remove this check once the in_order hint is deprecated.
+ if !inOrder {
+ order, err := parseTabletPickerTabletOrderString(options.TabletOrder)
+ if err != nil {
+ return nil, err
+ }
+ switch order {
+ case TabletPickerTabletOrder_Any:
+ inOrder = false
+ case TabletPickerTabletOrder_InOrder:
+ inOrder = true
+ }
+ }
+
+ aliasCellMap := make(map[string]string)
+ if cellPref == TabletPickerCellPreference_PreferLocalWithAlias {
+ if localCell == "" {
+ return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot have local cell preference without local cell")
+ }
+
+ // Add local cell to the list of cells for tablet picking.
+ // This will be de-duped later if the local cell already exists in the original list - see: dedupeCells()
+ cells = append(cells, localCell)
+ aliasName := topo.GetAliasByCell(ctx, ts, localCell)
+
+ // If an alias exists
+ if aliasName != localCell {
+ alias, err := ts.GetCellsAlias(ctx, aliasName, false)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "error fetching local cell alias")
+ }
+
+ // Add the aliasName to the list of cells for tablet picking.
+ cells = append(cells, aliasName)
+
+ // Create a map of the cells in the alias to make lookup faster later when we're giving preference to these.
+ // see prioritizeTablets()
+ for _, c := range alias.Cells {
+ aliasCellMap[c] = c
+ }
+ }
+ }
+
+ tp := &TabletPicker{
+ ts: ts,
+ cells: dedupeCells(cells),
+ localCellInfo: localCellInfo{localCell: localCell, cellsInAlias: aliasCellMap},
+ keyspace: keyspace,
+ shard: shard,
+ tabletTypes: tabletTypes,
+ inOrder: inOrder,
+ cellPref: cellPref,
+ ignoreTablets: make(map[string]struct{}, len(ignoreTablets)),
+ }
+
+ for _, ignoreTablet := range ignoreTablets {
+ tp.ignoreTablets[ignoreTablet.String()] = struct{}{}
+ }
+
+ return tp, nil
+
}
-// PickForStreaming picks an available tablet.
-// All tablets that belong to tp.cells are evaluated and one is
-// chosen at random.
+// dedupeCells is used to remove duplicates in the cell list in case it is passed in
+// and exists in the local cell's alias. Can happen if CellPreference is PreferLocalWithAlias.
+func dedupeCells(cells []string) []string {
+ keys := make(map[string]bool)
+ dedupedCells := []string{}
+
+ for _, c := range cells {
+ if _, value := keys[c]; !value {
+ keys[c] = true
+ dedupedCells = append(dedupedCells, c)
+ }
+ }
+ return dedupedCells
+}
+
+// prioritizeTablets orders the candidate pool of tablets based on CellPreference.
+// If CellPreference is PreferLocalWithAlias then tablets in the local cell will be prioritized for selection,
+// followed by the tablets within the local cell's alias, and finally any others specified by the client.
+// If CellPreference is OnlySpecified, then tablets will only be selected randomly from the cells specified by the client.
+func (tp *TabletPicker) prioritizeTablets(candidates []*topo.TabletInfo) (sameCell, sameAlias, allOthers []*topo.TabletInfo) {
+ for _, c := range candidates {
+ if c.Alias.Cell == tp.localCellInfo.localCell {
+ sameCell = append(sameCell, c)
+ } else if _, ok := tp.localCellInfo.cellsInAlias[c.Alias.Cell]; ok {
+ sameAlias = append(sameAlias, c)
+ } else {
+ allOthers = append(allOthers, c)
+ }
+ }
+
+ return sameCell, sameAlias, allOthers
+}
+
+func (tp *TabletPicker) orderByTabletType(candidates []*topo.TabletInfo) []*topo.TabletInfo {
+ // Sort candidates slice such that tablets appear in same tablet type order as in tp.tabletTypes
+ orderMap := map[topodatapb.TabletType]int{}
+ for i, t := range tp.tabletTypes {
+ orderMap[t] = i
+ }
+ sort.Slice(candidates, func(i, j int) bool {
+ if orderMap[candidates[i].Type] == orderMap[candidates[j].Type] {
+ // identical tablet types: randomize order of tablets for this type
+ return rand.Intn(2) == 0 // 50% chance
+ }
+ return orderMap[candidates[i].Type] < orderMap[candidates[j].Type]
+ })
+
+ return candidates
+}
+
+// PickForStreaming picks a tablet that is healthy and serving.
+// Selection is based on CellPreference.
+// See prioritizeTablets for prioritization logic.
func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Tablet, error) {
- rand.Seed(time.Now().UnixNano())
- // keep trying at intervals (tabletPickerRetryDelay) until a tablet is found
- // or the context is canceled
+ // Keep trying at intervals (tabletPickerRetryDelay) until a healthy
+ // serving tablet is found or the context is cancelled.
for {
select {
case <-ctx.Done():
@@ -116,29 +302,40 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table
default:
}
candidates := tp.GetMatchingTablets(ctx)
- if tp.inOrder {
- // Sort candidates slice such that tablets appear in same tablet type order as in tp.tabletTypes
- orderMap := map[topodatapb.TabletType]int{}
- for i, t := range tp.tabletTypes {
- orderMap[t] = i
+ if tp.cellPref == TabletPickerCellPreference_PreferLocalWithAlias {
+ sameCellCandidates, sameAliasCandidates, allOtherCandidates := tp.prioritizeTablets(candidates)
+
+ if tp.inOrder {
+ sameCellCandidates = tp.orderByTabletType(sameCellCandidates)
+ sameAliasCandidates = tp.orderByTabletType(sameAliasCandidates)
+ allOtherCandidates = tp.orderByTabletType(allOtherCandidates)
+ } else {
+ // Randomize candidates
+ rand.Shuffle(len(sameCellCandidates), func(i, j int) {
+ sameCellCandidates[i], sameCellCandidates[j] = sameCellCandidates[j], sameCellCandidates[i]
+ })
+ rand.Shuffle(len(sameAliasCandidates), func(i, j int) {
+ sameAliasCandidates[i], sameAliasCandidates[j] = sameAliasCandidates[j], sameAliasCandidates[i]
+ })
+ rand.Shuffle(len(allOtherCandidates), func(i, j int) {
+ allOtherCandidates[i], allOtherCandidates[j] = allOtherCandidates[j], allOtherCandidates[i]
+ })
}
- sort.Slice(candidates, func(i, j int) bool {
- if orderMap[candidates[i].Type] == orderMap[candidates[j].Type] {
- // identical tablet types: randomize order of tablets for this type
- return rand.Intn(2) == 0 // 50% chance
- }
- return orderMap[candidates[i].Type] < orderMap[candidates[j].Type]
- })
+
+ candidates = append(sameCellCandidates, sameAliasCandidates...)
+ candidates = append(candidates, allOtherCandidates...)
+ } else if tp.inOrder {
+ candidates = tp.orderByTabletType(candidates)
} else {
- // Randomize candidates
+ // Randomize candidates.
rand.Shuffle(len(candidates), func(i, j int) {
candidates[i], candidates[j] = candidates[j], candidates[i]
})
}
if len(candidates) == 0 {
- // if no candidates were found, sleep and try again
+ // If no viable candidates were found, sleep and try again.
tp.incNoTabletFoundStat()
- log.Infof("No tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds",
+ log.Infof("No healthy serving tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds.",
tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, float64(GetTabletPickerRetryDelay().Milliseconds())/1000.0)
timer := time.NewTimer(GetTabletPickerRetryDelay())
select {
@@ -149,72 +346,67 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table
}
continue
}
- for _, ti := range candidates {
- // try to connect to tablet
- if conn, err := tabletconn.GetDialer()(ti.Tablet, true); err == nil {
- // OK to use ctx here because it is not actually used by the underlying Close implementation
- _ = conn.Close(ctx)
- log.Infof("tablet picker found tablet %s", ti.Tablet.String())
- return ti.Tablet, nil
- }
- // err found
- log.Warningf("unable to connect to tablet for alias %v", ti.Alias)
- }
- // Got here? Means we iterated all tablets and did not find a healthy one
- tp.incNoTabletFoundStat()
+ log.Infof("Tablet picker found a healthy serving tablet for streaming: %s", candidates[0].Tablet.String())
+ return candidates[0].Tablet, nil
}
}
-// GetMatchingTablets returns a list of TabletInfo for tablets
-// that match the cells, keyspace, shard and tabletTypes for this TabletPicker
+// GetMatchingTablets returns a list of TabletInfo for healthy
+// serving tablets that match the cells, keyspace, shard and
+// tabletTypes for this TabletPicker.
func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletInfo {
- // Special handling for PRIMARY tablet type
- // Since there is only one primary, we ignore cell and find the primary
+ // Special handling for PRIMARY tablet type: since there is only
+ // one primary per shard, we ignore cell and find the primary.
aliases := make([]*topodatapb.TabletAlias, 0)
if len(tp.tabletTypes) == 1 && tp.tabletTypes[0] == topodatapb.TabletType_PRIMARY {
shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
si, err := tp.ts.GetShard(shortCtx, tp.keyspace, tp.shard)
if err != nil {
- log.Errorf("error getting shard %s/%s: %s", tp.keyspace, tp.shard, err.Error())
+ log.Errorf("Error getting shard %s/%s: %v", tp.keyspace, tp.shard, err)
return nil
}
- aliases = append(aliases, si.PrimaryAlias)
+ if _, ignore := tp.ignoreTablets[si.PrimaryAlias.String()]; !ignore {
+ aliases = append(aliases, si.PrimaryAlias)
+ }
} else {
actualCells := make([]string, 0)
for _, cell := range tp.cells {
- // check if cell is actually an alias
- // non-blocking read so that this is fast
+ // Check if cell is actually an alias; using a
+ // non-blocking read so that this is fast.
shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
_, err := tp.ts.GetCellInfo(shortCtx, cell, false)
if err != nil {
- // not a valid cell, check whether it is a cell alias
+ // Not a valid cell, check whether it is a cell alias...
shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
alias, err := tp.ts.GetCellsAlias(shortCtx, cell, false)
- // if we get an error, either cellAlias doesn't exist or it isn't a cell alias at all. Ignore and continue
+ // If we get an error, either cellAlias doesn't exist or
+ // it isn't a cell alias at all; ignore and continue.
if err == nil {
actualCells = append(actualCells, alias.Cells...)
} else {
log.Infof("Unable to resolve cell %s, ignoring", cell)
}
} else {
- // valid cell, add it to our list
+ // Valid cell, add it to our list.
actualCells = append(actualCells, cell)
}
}
+
for _, cell := range actualCells {
shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
- // match cell, keyspace and shard
+ // Match cell, keyspace, and shard.
sri, err := tp.ts.GetShardReplication(shortCtx, cell, tp.keyspace, tp.shard)
if err != nil {
continue
}
-
for _, node := range sri.Nodes {
- aliases = append(aliases, node.TabletAlias)
+ if _, ignore := tp.ignoreTablets[node.TabletAlias.String()]; !ignore {
+ aliases = append(aliases, node.TabletAlias)
+ }
}
}
}
@@ -222,33 +414,47 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn
if len(aliases) == 0 {
return nil
}
+
shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer cancel()
- tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases)
+ tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases, nil)
if err != nil {
- log.Warningf("error fetching tablets from topo: %v", err)
- // If we get a partial result we can still use it, otherwise return
+ log.Warningf("Error fetching tablets from topo: %v", err)
+ // If we get a partial result we can still use it, otherwise return.
if len(tabletMap) == 0 {
return nil
}
}
+
tablets := make([]*topo.TabletInfo, 0, len(aliases))
for _, tabletAlias := range aliases {
tabletInfo, ok := tabletMap[topoproto.TabletAliasString(tabletAlias)]
if !ok {
- // Either tablet disappeared on us, or we got a partial result (GetTabletMap ignores
- // topo.ErrNoNode). Just log a warning
- log.Warningf("failed to load tablet %v", tabletAlias)
+ // Either tablet disappeared on us, or we got a partial result
+ // (GetTabletMap ignores topo.ErrNoNode); just log a warning.
+ log.Warningf("Tablet picker failed to load tablet %v", tabletAlias)
} else if topoproto.IsTypeInList(tabletInfo.Type, tp.tabletTypes) {
- tablets = append(tablets, tabletInfo)
+ // Try to connect to the tablet and confirm that it's usable.
+ if conn, err := tabletconn.GetDialer()(tabletInfo.Tablet, grpcclient.FailFast(true)); err == nil {
+ // Ensure that the tablet is healthy and serving.
+ shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
+ defer cancel()
+ if err := conn.StreamHealth(shortCtx, func(shr *querypb.StreamHealthResponse) error {
+ if shr != nil && shr.Serving && shr.RealtimeStats != nil && shr.RealtimeStats.HealthError == "" {
+ return io.EOF // End the stream
+ }
+ return vterrors.New(vtrpcpb.Code_INTERNAL, "tablet is not healthy and serving")
+ }); err == nil || err == io.EOF {
+ tablets = append(tablets, tabletInfo)
+ }
+ _ = conn.Close(ctx)
+ }
}
}
return tablets
}
func init() {
- // TODO(sougou): consolidate this call to be once per process.
- rand.Seed(time.Now().UnixNano())
globalTPStats = newTabletPickerStats()
}
diff --git a/go/vt/discovery/tablet_picker_test.go b/go/vt/discovery/tablet_picker_test.go
index ed071af13ad..91b936303df 100644
--- a/go/vt/discovery/tablet_picker_test.go
+++ b/go/vt/discovery/tablet_picker_test.go
@@ -1,12 +1,9 @@
/*
Copyright 2019 The Vitess Authors.
-
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
http://www.apache.org/licenses/LICENSE-2.0
-
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -25,174 +22,12 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
- querypb "vitess.io/vitess/go/vt/proto/query"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
-)
-
-func TestPickSimple(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell"})
- want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica")
- require.NoError(t, err)
-
- tablet, err := tp.PickForStreaming(context.Background())
- require.NoError(t, err)
- assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want)
-}
-
-func TestPickFromTwoHealthy(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell"})
- want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true)
- defer deleteTablet(t, te, want2)
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly")
- require.NoError(t, err)
-
- // In 20 attempts, both tablet types must be picked at least once.
- var picked1, picked2 bool
- for i := 0; i < 20; i++ {
- tablet, err := tp.PickForStreaming(context.Background())
- require.NoError(t, err)
- if proto.Equal(tablet, want1) {
- picked1 = true
- }
- if proto.Equal(tablet, want2) {
- picked2 = true
- }
- }
- assert.True(t, picked1)
- assert.True(t, picked2)
-}
-
-func TestPickInOrder1(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell"})
- want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true)
- defer deleteTablet(t, te, want2)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "in_order:replica,rdonly")
- require.NoError(t, err)
-
- // In 20 attempts, we always pick the first healthy tablet in order
- var picked1, picked2 bool
- for i := 0; i < 20; i++ {
- tablet, err := tp.PickForStreaming(context.Background())
- require.NoError(t, err)
- if proto.Equal(tablet, want1) {
- picked1 = true
- }
- if proto.Equal(tablet, want2) {
- picked2 = true
- }
- }
- assert.True(t, picked1)
- assert.False(t, picked2)
-}
-
-func TestPickInOrder2(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell"})
- want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true)
- defer deleteTablet(t, te, want2)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "in_order:rdonly,replica")
- require.NoError(t, err)
-
- // In 20 attempts, we always pick the first healthy tablet in order
- var picked1, picked2 bool
- for i := 0; i < 20; i++ {
- tablet, err := tp.PickForStreaming(context.Background())
- require.NoError(t, err)
- if proto.Equal(tablet, want1) {
- picked1 = true
- }
- if proto.Equal(tablet, want2) {
- picked2 = true
- }
- }
- assert.False(t, picked1)
- assert.True(t, picked2)
-}
-
-func TestPickInOrderMultipleInGroup(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell"})
- want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true)
- defer deleteTablet(t, te, want2)
- want3 := addTablet(te, 102, topodatapb.TabletType_RDONLY, "cell", true, true)
- defer deleteTablet(t, te, want3)
- want4 := addTablet(te, 103, topodatapb.TabletType_RDONLY, "cell", true, true)
- defer deleteTablet(t, te, want4)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "in_order:rdonly,replica")
- require.NoError(t, err)
-
- // In 40 attempts, we pick each of the three RDONLY, but never the REPLICA
- var picked1, picked2, picked3, picked4 bool
- for i := 0; i < 40; i++ {
- tablet, err := tp.PickForStreaming(context.Background())
- require.NoError(t, err)
- if proto.Equal(tablet, want1) {
- picked1 = true
- }
- if proto.Equal(tablet, want2) {
- picked2 = true
- }
- if proto.Equal(tablet, want3) {
- picked3 = true
- }
- if proto.Equal(tablet, want4) {
- picked4 = true
- }
- }
- assert.False(t, picked1)
- assert.True(t, picked2)
- assert.True(t, picked3)
- assert.True(t, picked4)
-}
-
-func TestPickRespectsTabletType(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell"})
- want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want)
- dont := addTablet(te, 101, topodatapb.TabletType_PRIMARY, "cell", true, true)
- defer deleteTablet(t, te, dont)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly")
- require.NoError(t, err)
-
- // In 20 attempts, primary tablet must be never picked
- for i := 0; i < 20; i++ {
- tablet, err := tp.PickForStreaming(context.Background())
- require.NoError(t, err)
- require.NotNil(t, tablet)
- require.True(t, proto.Equal(tablet, want), "picked wrong tablet type")
- }
-}
-
-func TestPickMultiCell(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell", "otherCell"})
- want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica")
- require.NoError(t, err)
-
- ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
- defer cancel()
- tablet, err := tp.PickForStreaming(ctx)
- require.NoError(t, err)
- assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want)
-}
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
func TestPickPrimary(t *testing.T) {
te := newPickerTestEnv(t, []string{"cell", "otherCell"})
@@ -206,7 +41,7 @@ func TestPickPrimary(t *testing.T) {
})
require.NoError(t, err)
- tp, err := NewTabletPicker(te.topoServ, []string{"otherCell"}, te.keyspace, te.shard, "primary")
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"otherCell"}, "cell", te.keyspace, te.shard, "primary", TabletPickerOptions{})
require.NoError(t, err)
ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond)
@@ -216,38 +51,278 @@ func TestPickPrimary(t *testing.T) {
assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want)
}
-func TestPickFromOtherCell(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell", "otherCell"})
- want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "otherCell", true, true)
- defer deleteTablet(t, te, want)
+func TestPickLocalPreferences(t *testing.T) {
+ type tablet struct {
+ id uint32
+ typ topodatapb.TabletType
+ cell string
+ }
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica")
- require.NoError(t, err)
+ type testCase struct {
+ name string
- ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
- defer cancel()
- tablet, err := tp.PickForStreaming(ctx)
- require.NoError(t, err)
- assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want)
+ //inputs
+ tablets []tablet
+ envCells []string
+ inCells []string
+ localCell string
+ inTabletTypes string
+ options TabletPickerOptions
+
+ //expected
+ tpCells []string
+ wantTablets []uint32
+ }
+
+ tcases := []testCase{
+ {
+ name: "pick simple",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "pick from two healthy",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "replica,rdonly",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{100, 101},
+ }, {
+ name: "pick in order replica",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "in_order:replica,rdonly",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "pick in order rdonly",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "in_order:rdonly,replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{101},
+ }, {
+ name: "pick in order multiple in group",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "cell"},
+ {102, topodatapb.TabletType_RDONLY, "cell"},
+ {103, topodatapb.TabletType_RDONLY, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "in_order:rdonly,replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{101, 102, 103},
+ }, {
+ // Same test as above, except the in order preference is passed via the new TabletPickerOptions param.
+ // This will replace the above test when we deprecate the "in_order" hint in the tabletTypeStr
+ name: "pick in order multiple in group with new picker option",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "cell"},
+ {102, topodatapb.TabletType_RDONLY, "cell"},
+ {103, topodatapb.TabletType_RDONLY, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "rdonly,replica",
+ options: TabletPickerOptions{TabletOrder: "InOrder"},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{101, 102, 103},
+ }, {
+ name: "picker respects tablet type",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_PRIMARY, "cell"},
+ },
+ envCells: []string{"cell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "replica,rdonly",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "pick multi cell",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell", "otherCell"},
+ localCell: "cell",
+ inTabletTypes: "replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "otherCell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "pick from other cell",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "otherCell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell", "otherCell"},
+ localCell: "cell",
+ inTabletTypes: "replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "otherCell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "don't pick from other cell",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_REPLICA, "otherCell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell"},
+ localCell: "cell",
+ inTabletTypes: "replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "multi cell two tablets, local preference default",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_REPLICA, "otherCell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell", "otherCell"},
+ localCell: "cell",
+ inTabletTypes: "replica",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "otherCell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "multi cell two tablets, only specified cells",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_REPLICA, "otherCell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell", "otherCell"},
+ localCell: "cell",
+ inTabletTypes: "replica",
+ options: TabletPickerOptions{CellPreference: "OnlySpecified"},
+ tpCells: []string{"cell", "otherCell"},
+ wantTablets: []uint32{100, 101},
+ }, {
+ name: "multi cell two tablet types, local preference default",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "otherCell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell", "otherCell"},
+ localCell: "cell",
+ inTabletTypes: "replica,rdonly",
+ options: TabletPickerOptions{},
+ tpCells: []string{"cell", "otherCell", "cella"},
+ wantTablets: []uint32{100},
+ }, {
+ name: "multi cell two tablet types, only specified cells",
+ tablets: []tablet{
+ {100, topodatapb.TabletType_REPLICA, "cell"},
+ {101, topodatapb.TabletType_RDONLY, "otherCell"},
+ },
+ envCells: []string{"cell", "otherCell"},
+ inCells: []string{"cell", "otherCell"},
+ localCell: "cell",
+ inTabletTypes: "replica,rdonly",
+ options: TabletPickerOptions{CellPreference: "OnlySpecified"},
+ tpCells: []string{"cell", "otherCell"},
+ wantTablets: []uint32{100, 101},
+ },
+ }
+
+ ctx := context.Background()
+ for _, tcase := range tcases {
+ t.Run(tcase.name, func(t *testing.T) {
+ te := newPickerTestEnv(t, tcase.envCells)
+ var testTablets []*topodatapb.Tablet
+ for _, tab := range tcase.tablets {
+ testTablets = append(testTablets, addTablet(te, int(tab.id), tab.typ, tab.cell, true, true))
+ }
+ defer func() {
+ for _, tab := range testTablets {
+ deleteTablet(t, te, tab)
+ }
+ }()
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, tcase.inCells, tcase.localCell, te.keyspace, te.shard, tcase.inTabletTypes, tcase.options)
+ require.NoError(t, err)
+ require.Equal(t, tp.localCellInfo.localCell, tcase.localCell)
+ require.ElementsMatch(t, tp.cells, tcase.tpCells)
+
+ var selectedTablets []uint32
+ selectedTabletMap := make(map[uint32]bool)
+ for i := 0; i < 40; i++ {
+ tab, err := tp.PickForStreaming(ctx)
+ require.NoError(t, err)
+ selectedTabletMap[tab.Alias.Uid] = true
+ }
+ for uid := range selectedTabletMap {
+ selectedTablets = append(selectedTablets, uid)
+ }
+ require.ElementsMatch(t, selectedTablets, tcase.wantTablets)
+ })
+ }
}
-func TestDontPickFromOtherCell(t *testing.T) {
+func TestPickCellPreferenceLocalCell(t *testing.T) {
+ // test env puts all cells into an alias called "cella"
te := newPickerTestEnv(t, []string{"cell", "otherCell"})
want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true)
- defer deleteTablet(t, te, want2)
- tp, err := NewTabletPicker(te.topoServ, []string{"cell"}, te.keyspace, te.shard, "replica")
+ // Local cell preference is default
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{})
require.NoError(t, err)
- ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
- defer cancel()
+ ctx1, cancel1 := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel1()
+ tablet, err := tp.PickForStreaming(ctx1)
+ require.NoError(t, err)
+ assert.True(t, proto.Equal(want1, tablet), "Pick: %v, want %v", tablet, want1)
- // In 20 attempts, only want1 must be picked because TabletPicker.cells = "cell"
+ // create a tablet in the other cell
+ want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true)
+ defer deleteTablet(t, te, want2)
+
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel2()
+
+ // In 20 attempts, only tablet in "cell" will be picked because we give local cell priority by default
var picked1, picked2 bool
for i := 0; i < 20; i++ {
- tablet, err := tp.PickForStreaming(ctx)
+ tablet, err := tp.PickForStreaming(ctx2)
require.NoError(t, err)
if proto.Equal(tablet, want1) {
picked1 = true
@@ -260,71 +335,53 @@ func TestDontPickFromOtherCell(t *testing.T) {
assert.False(t, picked2)
}
-func TestPickMultiCellTwoTablets(t *testing.T) {
+func TestPickCellPreferenceLocalAlias(t *testing.T) {
+ // test env puts all cells into an alias called "cella"
te := newPickerTestEnv(t, []string{"cell", "otherCell"})
- want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true)
- defer deleteTablet(t, te, want2)
-
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica")
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{})
require.NoError(t, err)
+ // create a tablet in the other cell, it should be picked
+ want := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true)
+ defer deleteTablet(t, te, want)
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
-
- // In 20 attempts, both tablet types must be picked at least once.
- var picked1, picked2 bool
- for i := 0; i < 20; i++ {
- tablet, err := tp.PickForStreaming(ctx)
- require.NoError(t, err)
- if proto.Equal(tablet, want1) {
- picked1 = true
- }
- if proto.Equal(tablet, want2) {
- picked2 = true
- }
- }
- assert.True(t, picked1)
- assert.True(t, picked2)
+ tablet, err := tp.PickForStreaming(ctx)
+ require.NoError(t, err)
+ assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want)
}
-func TestPickMultiCellTwoTabletTypes(t *testing.T) {
- te := newPickerTestEnv(t, []string{"cell", "otherCell"})
- want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
- defer deleteTablet(t, te, want1)
- want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "otherCell", true, true)
- defer deleteTablet(t, te, want2)
+func TestPickWithIgnoreList(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
+ defer cancel()
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly")
- require.NoError(t, err)
+ te := newPickerTestEnv(t, []string{"cell1", "cell2"})
- ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
- defer cancel()
+ want := addTablet(te, 101, topodatapb.TabletType_REPLICA, "cell1", true, true)
+ defer deleteTablet(t, te, want)
- // In 20 attempts, both tablet types must be picked at least once.
- var picked1, picked2 bool
- for i := 0; i < 20; i++ {
+ dontWant := addTablet(te, 102, topodatapb.TabletType_REPLICA, "cell1", true, true)
+ defer deleteTablet(t, te, dontWant)
+
+ // Specify the alias as the cell.
+ tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell1", te.keyspace, te.shard, "replica", TabletPickerOptions{}, dontWant.GetAlias())
+ require.NoError(t, err)
+
+ // Try it many times to be sure we don't ever pick from the ignore list.
+ for i := 0; i < 100; i++ {
tablet, err := tp.PickForStreaming(ctx)
require.NoError(t, err)
- if proto.Equal(tablet, want1) {
- picked1 = true
- }
- if proto.Equal(tablet, want2) {
- picked2 = true
- }
+ require.False(t, proto.Equal(dontWant, tablet), "Picked the tablet we shouldn't have: %v", dontWant)
}
- assert.True(t, picked1)
- assert.True(t, picked2)
}
-func TestPickUsingCellAlias(t *testing.T) {
+func TestPickUsingCellAliasOnlySpecified(t *testing.T) {
// test env puts all cells into an alias called "cella"
te := newPickerTestEnv(t, []string{"cell", "otherCell"})
want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true)
defer deleteTablet(t, te, want1)
- tp, err := NewTabletPicker(te.topoServ, []string{"cella"}, te.keyspace, te.shard, "replica")
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"})
require.NoError(t, err)
ctx1, cancel1 := context.WithTimeout(context.Background(), 200*time.Millisecond)
@@ -348,7 +405,8 @@ func TestPickUsingCellAlias(t *testing.T) {
ctx3, cancel3 := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel3()
- // In 20 attempts, both tablet types must be picked at least once.
+ // In 20 attempts each of the tablets should get picked at least once.
+ // Local cell is not given preference
var picked1, picked2 bool
for i := 0; i < 20; i++ {
tablet, err := tp.PickForStreaming(ctx3)
@@ -366,7 +424,7 @@ func TestPickUsingCellAlias(t *testing.T) {
func TestTabletAppearsDuringSleep(t *testing.T) {
te := newPickerTestEnv(t, []string{"cell"})
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica")
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{})
require.NoError(t, err)
delay := GetTabletPickerRetryDelay()
@@ -392,12 +450,38 @@ func TestTabletAppearsDuringSleep(t *testing.T) {
assert.True(t, proto.Equal(want, got), "Pick: %v, want %v", got, want)
}
-func TestPickError(t *testing.T) {
+func TestPickErrorLocalPreferenceDefault(t *testing.T) {
te := newPickerTestEnv(t, []string{"cell"})
- _, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "badtype")
+ _, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "badtype", TabletPickerOptions{})
assert.EqualError(t, err, "failed to parse list of tablet types: badtype")
- tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica")
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{})
+ require.NoError(t, err)
+ delay := GetTabletPickerRetryDelay()
+ defer func() {
+ SetTabletPickerRetryDelay(delay)
+ }()
+ SetTabletPickerRetryDelay(11 * time.Millisecond)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond)
+ defer cancel()
+ // no tablets
+ _, err = tp.PickForStreaming(ctx)
+ require.EqualError(t, err, "context has expired")
+ // no tablets of the correct type
+ defer deleteTablet(t, te, addTablet(te, 200, topodatapb.TabletType_RDONLY, "cell", true, true))
+ ctx, cancel = context.WithTimeout(context.Background(), 20*time.Millisecond)
+ defer cancel()
+ _, err = tp.PickForStreaming(ctx)
+ require.EqualError(t, err, "context has expired")
+ // if local preference is selected, tp cells include's the local cell's alias
+ require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell_cella.ks.0.replica"], int64(0))
+}
+
+func TestPickErrorOnlySpecified(t *testing.T) {
+ te := newPickerTestEnv(t, []string{"cell"})
+
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"})
require.NoError(t, err)
delay := GetTabletPickerRetryDelay()
defer func() {
@@ -416,9 +500,49 @@ func TestPickError(t *testing.T) {
defer cancel()
_, err = tp.PickForStreaming(ctx)
require.EqualError(t, err, "context has expired")
+
require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell.ks.0.replica"], int64(0))
}
+// TestPickFallbackType tests that when providing a list of tablet types to
+// pick from, with the list in preference order, that when the primary/first
+// type has no available healthy serving tablets that we select a healthy
+// serving tablet from the secondary/second type.
+func TestPickFallbackType(t *testing.T) {
+ cells := []string{"cell1", "cell2"}
+ localCell := cells[0]
+ tabletTypes := "replica,primary"
+ options := TabletPickerOptions{
+ TabletOrder: "InOrder",
+ }
+ te := newPickerTestEnv(t, cells)
+
+ // This one should be selected even though it's the secondary type
+ // as it is healthy and serving.
+ primaryTablet := addTablet(te, 100, topodatapb.TabletType_PRIMARY, localCell, true, true)
+ defer deleteTablet(t, te, primaryTablet)
+
+ // Replica tablet should not be selected as it is unhealthy.
+ replicaTablet := addTablet(te, 200, topodatapb.TabletType_REPLICA, localCell, false, false)
+ defer deleteTablet(t, te, replicaTablet)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel()
+ _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = primaryTablet.Alias
+ return nil
+ })
+ require.NoError(t, err)
+
+ tp, err := NewTabletPicker(context.Background(), te.topoServ, cells, localCell, te.keyspace, te.shard, tabletTypes, options)
+ require.NoError(t, err)
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel2()
+ tablet, err := tp.PickForStreaming(ctx2)
+ require.NoError(t, err)
+ assert.True(t, proto.Equal(primaryTablet, tablet), "Pick: %v, want %v", tablet, primaryTablet)
+}
+
type pickerTestEnv struct {
t *testing.T
keyspace string
@@ -467,18 +591,21 @@ func addTablet(te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell
err := te.topoServ.CreateTablet(context.Background(), tablet)
require.NoError(te.t, err)
+ shr := &querypb.StreamHealthResponse{
+ Serving: serving,
+ Target: &querypb.Target{
+ Keyspace: te.keyspace,
+ Shard: te.shard,
+ TabletType: tabletType,
+ },
+ RealtimeStats: &querypb.RealtimeStats{HealthError: "tablet is unhealthy"},
+ }
if healthy {
- _ = createFixedHealthConn(tablet, &querypb.StreamHealthResponse{
- Serving: serving,
- Target: &querypb.Target{
- Keyspace: te.keyspace,
- Shard: te.shard,
- TabletType: tabletType,
- },
- RealtimeStats: &querypb.RealtimeStats{HealthError: ""},
- })
+ shr.RealtimeStats.HealthError = ""
}
+ _ = createFixedHealthConn(tablet, shr)
+
return tablet
}
diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go
index a2a70a1f2d0..76f051a456c 100644
--- a/go/vt/discovery/topology_watcher.go
+++ b/go/vt/discovery/topology_watcher.go
@@ -54,15 +54,15 @@ var (
"Operation", topologyWatcherOpListTablets, topologyWatcherOpGetTablet)
)
-// tabletInfo is used internally by the TopologyWatcher class
+// tabletInfo is used internally by the TopologyWatcher struct.
type tabletInfo struct {
alias string
tablet *topodata.Tablet
}
-// TopologyWatcher polls tablet from a configurable set of tablets
-// periodically. When tablets are added / removed, it calls
-// the LegacyTabletRecorder AddTablet / RemoveTablet interface appropriately.
+// TopologyWatcher polls the topology periodically for changes to
+// the set of tablets. When tablets are added / removed / modified,
+// it calls the AddTablet / RemoveTablet interface appropriately.
type TopologyWatcher struct {
// set at construction time
topoServer *topo.Server
@@ -71,8 +71,7 @@ type TopologyWatcher struct {
cell string
refreshInterval time.Duration
refreshKnownTablets bool
- getTablets func(tw *TopologyWatcher) ([]*topodata.TabletAlias, error)
- sem chan int
+ concurrency int64
ctx context.Context
cancelFunc context.CancelFunc
// wg keeps track of all launched Go routines.
@@ -80,49 +79,44 @@ type TopologyWatcher struct {
// mu protects all variables below
mu sync.Mutex
- // tablets contains a map of alias -> tabletInfo for all known tablets
+ // tablets contains a map of alias -> tabletInfo for all known tablets.
tablets map[string]*tabletInfo
- // topoChecksum stores a crc32 of the tablets map and is exported as a metric
+ // topoChecksum stores a crc32 of the tablets map and is exported as a metric.
topoChecksum uint32
- // lastRefresh records the timestamp of the last topo refresh
+ // lastRefresh records the timestamp of the last refresh of the topology.
lastRefresh time.Time
- // firstLoadDone is true when first load of the topology data is done.
+ // firstLoadDone is true when the initial load of the topology data is complete.
firstLoadDone bool
- // firstLoadChan is closed when the initial loading of topology data is done.
+ // firstLoadChan is closed when the initial load of topology data is complete.
firstLoadChan chan struct{}
}
// NewTopologyWatcher returns a TopologyWatcher that monitors all
-// the tablets in a cell, and starts refreshing.
-func NewTopologyWatcher(ctx context.Context, topoServer *topo.Server, hc HealthCheck, filter TabletFilter, cell string, refreshInterval time.Duration, refreshKnownTablets bool, topoReadConcurrency int, getTablets func(tw *TopologyWatcher) ([]*topodata.TabletAlias, error)) *TopologyWatcher {
+// the tablets in a cell, and reloads them as needed.
+func NewTopologyWatcher(ctx context.Context, topoServer *topo.Server, hc HealthCheck, f TabletFilter, cell string, refreshInterval time.Duration, refreshKnownTablets bool, topoReadConcurrency int64) *TopologyWatcher {
tw := &TopologyWatcher{
topoServer: topoServer,
healthcheck: hc,
- tabletFilter: filter,
+ tabletFilter: f,
cell: cell,
refreshInterval: refreshInterval,
refreshKnownTablets: refreshKnownTablets,
- getTablets: getTablets,
- sem: make(chan int, topoReadConcurrency),
+ concurrency: topoReadConcurrency,
tablets: make(map[string]*tabletInfo),
}
tw.firstLoadChan = make(chan struct{})
- // We want the span from the context, but not the cancelation that comes with it
+ // We want the span from the context, but not the cancellation that comes with it
spanContext := trace.CopySpan(context.Background(), ctx)
tw.ctx, tw.cancelFunc = context.WithCancel(spanContext)
return tw
}
-// NewCellTabletsWatcher returns a TopologyWatcher that monitors all
-// the tablets in a cell, and starts refreshing.
-func NewCellTabletsWatcher(ctx context.Context, topoServer *topo.Server, hc HealthCheck, f TabletFilter, cell string, refreshInterval time.Duration, refreshKnownTablets bool, topoReadConcurrency int) *TopologyWatcher {
- return NewTopologyWatcher(ctx, topoServer, hc, f, cell, refreshInterval, refreshKnownTablets, topoReadConcurrency, func(tw *TopologyWatcher) ([]*topodata.TabletAlias, error) {
- return tw.topoServer.GetTabletAliasesByCell(ctx, tw.cell)
- })
+func (tw *TopologyWatcher) getTablets() ([]*topo.TabletInfo, error) {
+ return tw.topoServer.GetTabletsByCell(tw.ctx, tw.cell, &topo.GetTabletsByCellOptions{Concurrency: tw.concurrency})
}
-// Start starts the topology watcher
+// Start starts the topology watcher.
func (tw *TopologyWatcher) Start() {
tw.wg.Add(1)
go func(t *TopologyWatcher) {
@@ -140,7 +134,7 @@ func (tw *TopologyWatcher) Start() {
}(tw)
}
-// Stop stops the watcher. It does not clean up the tablets added to LegacyTabletRecorder.
+// Stop stops the watcher. It does not clean up the tablets added to HealthCheck.
func (tw *TopologyWatcher) Stop() {
tw.cancelFunc()
// wait for watch goroutine to finish.
@@ -148,76 +142,55 @@ func (tw *TopologyWatcher) Stop() {
}
func (tw *TopologyWatcher) loadTablets() {
- var wg sync.WaitGroup
newTablets := make(map[string]*tabletInfo)
- // first get the list of relevant tabletAliases
- tabletAliases, err := tw.getTablets(tw)
+ // First get the list of all tablets.
+ tabletInfos, err := tw.getTablets()
topologyWatcherOperations.Add(topologyWatcherOpListTablets, 1)
if err != nil {
topologyWatcherErrors.Add(topologyWatcherOpListTablets, 1)
- select {
- case <-tw.ctx.Done():
+ // If we get a partial result error, we just log it and process the tablets that we did manage to fetch.
+ if topo.IsErrType(err, topo.PartialResult) {
+ log.Errorf("received partial result from getTablets for cell %v: %v", tw.cell, err)
+ } else { // For all other errors, just return.
+ log.Errorf("error getting tablets for cell: %v: %v", tw.cell, err)
return
- default:
}
log.Errorf("cannot get tablets for cell: %v: %v", tw.cell, err)
return
}
// Accumulate a list of all known alias strings to use later
- // when sorting
- tabletAliasStrs := make([]string, 0, len(tabletAliases))
+ // when sorting.
+ tabletAliasStrs := make([]string, 0, len(tabletInfos))
tw.mu.Lock()
- for _, tAlias := range tabletAliases {
- aliasStr := topoproto.TabletAliasString(tAlias)
+ defer tw.mu.Unlock()
+
+ for _, tInfo := range tabletInfos {
+ aliasStr := topoproto.TabletAliasString(tInfo.Alias)
tabletAliasStrs = append(tabletAliasStrs, aliasStr)
if !tw.refreshKnownTablets {
- // we already have a tabletInfo for this and the flag tells us to not refresh
+ // We already have a tabletInfo for this and the flag tells us to not refresh.
if val, ok := tw.tablets[aliasStr]; ok {
newTablets[aliasStr] = val
continue
}
}
-
- wg.Add(1)
- go func(alias *topodata.TabletAlias) {
- defer wg.Done()
- tw.sem <- 1 // Wait for active queue to drain.
- tablet, err := tw.topoServer.GetTablet(tw.ctx, alias)
- topologyWatcherOperations.Add(topologyWatcherOpGetTablet, 1)
- <-tw.sem // Done; enable next request to run
- if err != nil {
- topologyWatcherErrors.Add(topologyWatcherOpGetTablet, 1)
- select {
- case <-tw.ctx.Done():
- return
- default:
- }
- log.Errorf("cannot get tablet for alias %v: %v", alias, err)
- return
- }
- if !(tw.tabletFilter == nil || tw.tabletFilter.IsIncluded(tablet.Tablet)) {
- return
- }
- tw.mu.Lock()
- aliasStr := topoproto.TabletAliasString(alias)
- newTablets[aliasStr] = &tabletInfo{
- alias: aliasStr,
- tablet: tablet.Tablet,
- }
- tw.mu.Unlock()
- }(tAlias)
+ // There's no network call here, so we just do the tablets one at a time instead of in parallel goroutines.
+ newTablets[aliasStr] = &tabletInfo{
+ alias: aliasStr,
+ tablet: tInfo.Tablet,
+ }
}
- tw.mu.Unlock()
- wg.Wait()
- tw.mu.Lock()
-
for alias, newVal := range newTablets {
- // trust the alias from topo and add it if it doesn't exist
+ if tw.tabletFilter != nil && !tw.tabletFilter.IsIncluded(newVal.tablet) {
+ continue
+ }
+
+ // Trust the alias from topo and add it if it doesn't exist.
if val, ok := tw.tablets[alias]; ok {
// check if the host and port have changed. If yes, replace tablet.
oldKey := TabletToMapKey(val.tablet)
@@ -229,13 +202,17 @@ func (tw *TopologyWatcher) loadTablets() {
topologyWatcherOperations.Add(topologyWatcherOpReplaceTablet, 1)
}
} else {
- // This is a new tablet record, let's add it to the healthcheck
+ // This is a new tablet record, let's add it to the HealthCheck.
tw.healthcheck.AddTablet(newVal.tablet)
topologyWatcherOperations.Add(topologyWatcherOpAddTablet, 1)
}
}
for _, val := range tw.tablets {
+ if tw.tabletFilter != nil && !tw.tabletFilter.IsIncluded(val.tablet) {
+ continue
+ }
+
if _, ok := newTablets[val.alias]; !ok {
tw.healthcheck.RemoveTablet(val.tablet)
topologyWatcherOperations.Add(topologyWatcherOpRemoveTablet, 1)
@@ -247,8 +224,8 @@ func (tw *TopologyWatcher) loadTablets() {
close(tw.firstLoadChan)
}
- // iterate through the tablets in a stable order and compute a
- // checksum of the tablet map
+ // Iterate through the tablets in a stable order and compute a
+ // checksum of the tablet map.
sort.Strings(tabletAliasStrs)
var buf bytes.Buffer
for _, alias := range tabletAliasStrs {
@@ -260,11 +237,9 @@ func (tw *TopologyWatcher) loadTablets() {
tw.topoChecksum = crc32.ChecksumIEEE(buf.Bytes())
tw.lastRefresh = time.Now()
- tw.mu.Unlock()
-
}
-// RefreshLag returns the time since the last refresh
+// RefreshLag returns the time since the last refresh.
func (tw *TopologyWatcher) RefreshLag() time.Duration {
tw.mu.Lock()
defer tw.mu.Unlock()
@@ -272,7 +247,7 @@ func (tw *TopologyWatcher) RefreshLag() time.Duration {
return time.Since(tw.lastRefresh)
}
-// TopoChecksum returns the checksum of the current state of the topo
+// TopoChecksum returns the checksum of the current state of the topo.
func (tw *TopologyWatcher) TopoChecksum() uint32 {
tw.mu.Lock()
defer tw.mu.Unlock()
@@ -281,12 +256,25 @@ func (tw *TopologyWatcher) TopoChecksum() uint32 {
}
// TabletFilter is an interface that can be given to a TopologyWatcher
-// to be applied as an additional filter on the list of tablets returned by its getTablets function
+// to be applied as an additional filter on the list of tablets returned by its getTablets function.
type TabletFilter interface {
// IsIncluded returns whether tablet is included in this filter
IsIncluded(tablet *topodata.Tablet) bool
}
+// TabletFilters contains filters for tablets.
+type TabletFilters []TabletFilter
+
+// IsIncluded returns true if a tablet passes all filters.
+func (tf TabletFilters) IsIncluded(tablet *topodata.Tablet) bool {
+ for _, filter := range tf {
+ if !filter.IsIncluded(tablet) {
+ return false
+ }
+ }
+ return true
+}
+
// FilterByShard is a filter that filters tablets by
// keyspace/shard.
type FilterByShard struct {
@@ -295,18 +283,18 @@ type FilterByShard struct {
}
// filterShard describes a filter for a given shard or keyrange inside
-// a keyspace
+// a keyspace.
type filterShard struct {
keyspace string
shard string
keyRange *topodata.KeyRange // only set if shard is also a KeyRange
}
-// NewFilterByShard creates a new FilterByShard on top of an existing
-// LegacyTabletRecorder. Each filter is a keyspace|shard entry, where shard
+// NewFilterByShard creates a new FilterByShard for use by a
+// TopologyWatcher. Each filter is a keyspace|shard entry, where shard
// can either be a shard name, or a keyrange. All tablets that match
-// at least one keyspace|shard tuple will be forwarded to the
-// underlying LegacyTabletRecorder.
+// at least one keyspace|shard tuple will be forwarded by the
+// TopologyWatcher to its consumer.
func NewFilterByShard(filters []string) (*FilterByShard, error) {
m := make(map[string][]*filterShard)
for _, filter := range filters {
@@ -343,8 +331,7 @@ func NewFilterByShard(filters []string) (*FilterByShard, error) {
}, nil
}
-// IsIncluded returns true iff the tablet's keyspace and shard should be
-// forwarded to the underlying LegacyTabletRecorder.
+// IsIncluded returns true iff the tablet's keyspace and shard match what we have.
func (fbs *FilterByShard) IsIncluded(tablet *topodata.Tablet) bool {
canonical, kr, err := topo.ValidateShardName(tablet.Shard)
if err != nil {
@@ -365,15 +352,14 @@ func (fbs *FilterByShard) IsIncluded(tablet *topodata.Tablet) bool {
return false
}
-// FilterByKeyspace is a filter that filters tablets by
-// keyspace
+// FilterByKeyspace is a filter that filters tablets by keyspace.
type FilterByKeyspace struct {
keyspaces map[string]bool
}
// NewFilterByKeyspace creates a new FilterByKeyspace.
// Each filter is a keyspace entry. All tablets that match
-// a keyspace will be forwarded to the underlying LegacyTabletRecorder.
+// a keyspace will be forwarded to the TopologyWatcher's consumer.
func NewFilterByKeyspace(selectedKeyspaces []string) *FilterByKeyspace {
m := make(map[string]bool)
for _, keyspace := range selectedKeyspaces {
@@ -385,9 +371,37 @@ func NewFilterByKeyspace(selectedKeyspaces []string) *FilterByKeyspace {
}
}
-// IsIncluded returns true if the tablet's keyspace should be
-// forwarded to the underlying LegacyTabletRecorder.
+// IsIncluded returns true if the tablet's keyspace matches what we have.
func (fbk *FilterByKeyspace) IsIncluded(tablet *topodata.Tablet) bool {
_, exist := fbk.keyspaces[tablet.Keyspace]
return exist
}
+
+// FilterByTabletTags is a filter that filters tablets by tablet tag key/values.
+type FilterByTabletTags struct {
+ tags map[string]string
+}
+
+// NewFilterByTabletTags creates a new FilterByTabletTags. All tablets that match
+// all tablet tags will be forwarded to the TopologyWatcher's consumer.
+func NewFilterByTabletTags(tabletTags map[string]string) *FilterByTabletTags {
+ return &FilterByTabletTags{
+ tags: tabletTags,
+ }
+}
+
+// IsIncluded returns true if the tablet's tags match what we expect.
+func (fbtg *FilterByTabletTags) IsIncluded(tablet *topodata.Tablet) bool {
+ if fbtg.tags == nil {
+ return true
+ }
+ if tablet.Tags == nil {
+ return false
+ }
+ for key, val := range fbtg.tags {
+ if tabletVal, found := tablet.Tags[key]; !found || tabletVal != val {
+ return false
+ }
+ }
+ return true
+}
diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go
index 9d0876cf424..485bb23b09d 100644
--- a/go/vt/discovery/topology_watcher_test.go
+++ b/go/vt/discovery/topology_watcher_test.go
@@ -17,12 +17,13 @@ limitations under the License.
package discovery
import (
+ "context"
"math/rand"
"testing"
"time"
- "context"
-
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/vt/logutil"
@@ -44,26 +45,21 @@ func checkOpCounts(t *testing.T, prevCounts, deltas map[string]int64) map[string
newVal = 0
}
- if newVal != prevVal+delta {
- t.Errorf("expected %v to increase by %v, got %v -> %v", key, delta, prevVal, newVal)
- }
+ assert.Equal(t, newVal, prevVal+delta, "expected %v to increase by %v, got %v -> %v", key, delta, prevVal, newVal)
}
return newCounts
}
func checkChecksum(t *testing.T, tw *TopologyWatcher, want uint32) {
t.Helper()
- got := tw.TopoChecksum()
- if want != got {
- t.Errorf("want checksum %v got %v", want, got)
- }
+ assert.Equal(t, want, tw.TopoChecksum())
}
func TestStartAndCloseTopoWatcher(t *testing.T) {
ts := memorytopo.NewServer("aa")
fhc := NewFakeHealthCheck(nil)
topologyWatcherOperations.ZeroAll()
- tw := NewCellTabletsWatcher(context.Background(), ts, fhc, nil, "aa", 100*time.Microsecond, true, 5)
+ tw := NewTopologyWatcher(context.Background(), ts, fhc, nil, "aa", 100*time.Microsecond, true, 5)
done := make(chan bool, 3)
result := make(chan bool, 1)
@@ -100,9 +96,7 @@ func TestStartAndCloseTopoWatcher(t *testing.T) {
done <- true
_, ok := <-result
- if !ok {
- t.Fatal("timed out")
- }
+ require.True(t, ok, "timed out")
}
func TestCellTabletsWatcher(t *testing.T) {
@@ -115,11 +109,14 @@ func TestCellTabletsWatcherNoRefreshKnown(t *testing.T) {
func checkWatcher(t *testing.T, refreshKnownTablets bool) {
ts := memorytopo.NewServer("aa")
+ defer ts.Close()
fhc := NewFakeHealthCheck(nil)
+ defer fhc.Close()
+ filter := NewFilterByKeyspace([]string{"keyspace"})
logger := logutil.NewMemoryLogger()
topologyWatcherOperations.ZeroAll()
counts := topologyWatcherOperations.Counts()
- tw := NewCellTabletsWatcher(context.Background(), ts, fhc, nil, "aa", 10*time.Minute, refreshKnownTablets, 5)
+ tw := NewTopologyWatcher(context.Background(), ts, fhc, filter, "aa", 10*time.Minute, refreshKnownTablets, 5)
counts = checkOpCounts(t, counts, map[string]int64{})
checkChecksum(t, tw, 0)
@@ -137,19 +134,18 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) {
Keyspace: "keyspace",
Shard: "shard",
}
- if err := ts.CreateTablet(context.Background(), tablet); err != nil {
- t.Fatalf("CreateTablet failed: %v", err)
- }
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet), "CreateTablet failed for %v", tablet.Alias)
+
tw.loadTablets()
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "AddTablet": 1})
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 1})
checkChecksum(t, tw, 3238442862)
// Check the tablet is returned by GetAllTablets().
allTablets := fhc.GetAllTablets()
key := TabletToMapKey(tablet)
- if _, ok := allTablets[key]; !ok || len(allTablets) != 1 || !proto.Equal(allTablets[key], tablet) {
- t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet)
- }
+ assert.Len(t, allTablets, 1)
+ assert.Contains(t, allTablets, key)
+ assert.True(t, proto.Equal(tablet, allTablets[key]))
// Add a second tablet to the topology.
tablet2 := &topodatapb.Tablet{
@@ -164,76 +160,73 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) {
Keyspace: "keyspace",
Shard: "shard",
}
- if err := ts.CreateTablet(context.Background(), tablet2); err != nil {
- t.Fatalf("CreateTablet failed: %v", err)
- }
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet2), "CreateTablet failed for %v", tablet2.Alias)
tw.loadTablets()
- // If refreshKnownTablets is disabled, only the new tablet is read
- // from the topo
- if refreshKnownTablets {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "AddTablet": 1})
- } else {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "AddTablet": 1})
- }
+ // Confirm second tablet triggers ListTablets + AddTablet calls.
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 1})
checkChecksum(t, tw, 2762153755)
- // Check the new tablet is returned by GetAllTablets().
- allTablets = fhc.GetAllTablets()
- key = TabletToMapKey(tablet2)
- if _, ok := allTablets[key]; !ok || len(allTablets) != 2 || !proto.Equal(allTablets[key], tablet2) {
- t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet2)
+ // Add a third tablet in a filtered keyspace to the topology.
+ tablet3 := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "aa",
+ Uid: 3,
+ },
+ Hostname: "host3",
+ PortMap: map[string]int32{
+ "vt": 789,
+ },
+ Keyspace: "excluded",
+ Shard: "shard",
}
-
- // Load the tablets again to show that when refreshKnownTablets is disabled,
- // only the list is read from the topo and the checksum doesn't change
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet3), "CreateTablet failed for %v", tablet3.Alias)
tw.loadTablets()
- if refreshKnownTablets {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2})
- } else {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1})
- }
- checkChecksum(t, tw, 2762153755)
+
+ // Confirm filtered tablet did not trigger an AddTablet call.
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 0})
+ checkChecksum(t, tw, 3177315266)
+
+ // Check the second tablet is returned by GetAllTablets(). This should not contain the filtered tablet.
+ allTablets = fhc.GetAllTablets()
+ key = TabletToMapKey(tablet2)
+ assert.Len(t, allTablets, 2)
+ assert.Contains(t, allTablets, key)
+ assert.True(t, proto.Equal(tablet2, allTablets[key]))
// same tablet, different port, should update (previous
// one should go away, new one be added)
//
// if refreshKnownTablets is disabled, this case is *not*
- // detected and the tablet remains in the topo using the
+ // detected and the tablet remains in the healthcheck using the
// old key
origTablet := proto.Clone(tablet).(*topodatapb.Tablet)
origKey := TabletToMapKey(tablet)
tablet.PortMap["vt"] = 456
- if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
+ _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
t.PortMap["vt"] = 456
return nil
- }); err != nil {
- t.Fatalf("UpdateTabletFields failed: %v", err)
- }
+ })
+ require.Nil(t, err, "UpdateTabletFields failed")
+
tw.loadTablets()
allTablets = fhc.GetAllTablets()
key = TabletToMapKey(tablet)
if refreshKnownTablets {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "ReplaceTablet": 1})
-
- if _, ok := allTablets[key]; !ok || len(allTablets) != 2 || !proto.Equal(allTablets[key], tablet) {
- t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet)
- }
- if _, ok := allTablets[origKey]; ok {
- t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, origKey)
- }
- checkChecksum(t, tw, 2762153755)
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 1})
+ assert.Len(t, allTablets, 2)
+ assert.Contains(t, allTablets, key)
+ assert.True(t, proto.Equal(tablet, allTablets[key]))
+ assert.NotContains(t, allTablets, origKey)
+ checkChecksum(t, tw, 3177315266)
} else {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1})
-
- if _, ok := allTablets[origKey]; !ok || len(allTablets) != 2 || !proto.Equal(allTablets[origKey], origTablet) {
- t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, origTablet)
- }
- if _, ok := allTablets[key]; ok {
- t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key)
- }
- checkChecksum(t, tw, 2762153755)
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 0})
+ assert.Len(t, allTablets, 2)
+ assert.Contains(t, allTablets, origKey)
+ assert.True(t, proto.Equal(origTablet, allTablets[origKey]))
+ assert.NotContains(t, allTablets, key)
+ checkChecksum(t, tw, 3177315266)
}
// Both tablets restart on different hosts.
@@ -243,94 +236,79 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) {
origTablet := proto.Clone(tablet).(*topodatapb.Tablet)
origTablet2 := proto.Clone(tablet2).(*topodatapb.Tablet)
- if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error {
+ _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error {
t.Hostname = tablet.Hostname
t.PortMap = tablet.PortMap
tablet2 = t
return nil
- }); err != nil {
- t.Fatalf("UpdateTabletFields failed: %v", err)
- }
- if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
+ })
+ require.Nil(t, err, "UpdateTabletFields failed")
+ _, err = ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
t.Hostname = "host3"
tablet = t
return nil
- }); err != nil {
- t.Fatalf("UpdateTabletFields failed: %v", err)
- }
+ })
+ require.Nil(t, err, "UpdateTabletFields failed")
tw.loadTablets()
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "ReplaceTablet": 2})
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 2})
allTablets = fhc.GetAllTablets()
key2 := TabletToMapKey(tablet2)
- if _, ok := allTablets[key2]; !ok {
- t.Fatalf("tablet was lost because it's reusing an address recently used by another tablet: %v", key2)
- }
+ assert.Contains(t, allTablets, key2, "tablet was lost because it's reusing an address recently used by another tablet: %v", key2)
// Change tablets back to avoid altering later tests.
- if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error {
+ _, err = ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error {
t.Hostname = origTablet2.Hostname
t.PortMap = origTablet2.PortMap
tablet2 = t
return nil
- }); err != nil {
- t.Fatalf("UpdateTabletFields failed: %v", err)
- }
- if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
+ })
+ require.Nil(t, err, "UpdateTabletFields failed")
+
+ _, err = ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
t.Hostname = origTablet.Hostname
tablet = t
return nil
- }); err != nil {
- t.Fatalf("UpdateTabletFields failed: %v", err)
- }
+ })
+ require.Nil(t, err, "UpdateTabletFields failed")
+
tw.loadTablets()
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "ReplaceTablet": 2})
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 2})
}
// Remove the tablet and check that it is detected as being gone.
- if err := ts.DeleteTablet(context.Background(), tablet.Alias); err != nil {
- t.Fatalf("DeleteTablet failed: %v", err)
- }
- if _, err := topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard"); err != nil {
- t.Fatalf("FixShardReplication failed: %v", err)
- }
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet.Alias))
+
+ _, err = topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard")
+ require.Nil(t, err, "FixShardReplication failed")
tw.loadTablets()
- if refreshKnownTablets {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "RemoveTablet": 1})
- } else {
- counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "RemoveTablet": 1})
- }
- checkChecksum(t, tw, 789108290)
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "RemoveTablet": 1})
+ checkChecksum(t, tw, 852159264)
allTablets = fhc.GetAllTablets()
+ assert.Len(t, allTablets, 1)
key = TabletToMapKey(tablet)
- if _, ok := allTablets[key]; ok || len(allTablets) != 1 {
- t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key)
- }
- key = TabletToMapKey(tablet2)
- if _, ok := allTablets[key]; !ok || len(allTablets) != 1 || !proto.Equal(allTablets[key], tablet2) {
- t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet2)
- }
+ assert.NotContains(t, allTablets, key)
- // Remove the other and check that it is detected as being gone.
- if err := ts.DeleteTablet(context.Background(), tablet2.Alias); err != nil {
- t.Fatalf("DeleteTablet failed: %v", err)
- }
- if _, err := topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard"); err != nil {
- t.Fatalf("FixShardReplication failed: %v", err)
- }
+ key = TabletToMapKey(tablet2)
+ assert.Contains(t, allTablets, key)
+ assert.True(t, proto.Equal(tablet2, allTablets[key]))
+
+ // Remove the other tablets and check that it is detected as being gone.
+ // Deleting the filtered tablet should not trigger a RemoveTablet call.
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet2.Alias))
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet3.Alias))
+ _, err = topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard")
+ require.Nil(t, err, "FixShardReplication failed")
tw.loadTablets()
checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "RemoveTablet": 1})
checkChecksum(t, tw, 0)
allTablets = fhc.GetAllTablets()
+ assert.Len(t, allTablets, 0)
key = TabletToMapKey(tablet)
- if _, ok := allTablets[key]; ok || len(allTablets) != 0 {
- t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key)
- }
+ assert.NotContains(t, allTablets, key)
key = TabletToMapKey(tablet2)
- if _, ok := allTablets[key]; ok || len(allTablets) != 0 {
- t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key)
- }
+ assert.NotContains(t, allTablets, key)
tw.Stop()
}
@@ -397,19 +375,13 @@ func TestFilterByShard(t *testing.T) {
for _, tc := range testcases {
fbs, err := NewFilterByShard(tc.filters)
- if err != nil {
- t.Errorf("cannot create FilterByShard for filters %v: %v", tc.filters, err)
- }
+ require.Nil(t, err, "cannot create FilterByShard for filters %v", tc.filters)
tablet := &topodatapb.Tablet{
Keyspace: tc.keyspace,
Shard: tc.shard,
}
-
- got := fbs.IsIncluded(tablet)
- if got != tc.included {
- t.Errorf("isIncluded(%v,%v) for filters %v returned %v but expected %v", tc.keyspace, tc.shard, tc.filters, got, tc.included)
- }
+ require.Equal(t, tc.included, fbs.IsIncluded(tablet))
}
}
@@ -436,7 +408,7 @@ func TestFilterByKeyspace(t *testing.T) {
hc := NewFakeHealthCheck(nil)
f := NewFilterByKeyspace(testKeyspacesToWatch)
ts := memorytopo.NewServer(testCell)
- tw := NewCellTabletsWatcher(context.Background(), ts, hc, f, testCell, 10*time.Minute, true, 5)
+ tw := NewTopologyWatcher(context.Background(), ts, hc, f, testCell, 10*time.Minute, true, 5)
for _, test := range testFilterByKeyspace {
// Add a new tablet to the topology.
@@ -454,22 +426,21 @@ func TestFilterByKeyspace(t *testing.T) {
Shard: testShard,
}
- got := f.IsIncluded(tablet)
- if got != test.expected {
- t.Errorf("isIncluded(%v) for keyspace %v returned %v but expected %v", test.keyspace, test.keyspace, got, test.expected)
- }
+ assert.Equal(t, test.expected, f.IsIncluded(tablet))
- if err := ts.CreateTablet(context.Background(), tablet); err != nil {
- t.Errorf("CreateTablet failed: %v", err)
- }
+ // Make this fatal because there is no point continuing if CreateTablet fails
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet))
tw.loadTablets()
key := TabletToMapKey(tablet)
allTablets := hc.GetAllTablets()
- if _, ok := allTablets[key]; ok != test.expected && proto.Equal(allTablets[key], tablet) != test.expected {
- t.Errorf("Error adding tablet - got %v; want %v", ok, test.expected)
+ if test.expected {
+ assert.Contains(t, allTablets, key)
+ } else {
+ assert.NotContains(t, allTablets, key)
}
+ assert.Equal(t, test.expected, proto.Equal(tablet, allTablets[key]))
// Replace the tablet we added above
tabletReplacement := &topodatapb.Tablet{
@@ -484,25 +455,160 @@ func TestFilterByKeyspace(t *testing.T) {
Keyspace: test.keyspace,
Shard: testShard,
}
- got = f.IsIncluded(tabletReplacement)
- if got != test.expected {
- t.Errorf("isIncluded(%v) for keyspace %v returned %v but expected %v", test.keyspace, test.keyspace, got, test.expected)
- }
- if err := ts.CreateTablet(context.Background(), tabletReplacement); err != nil {
- t.Errorf("CreateTablet failed: %v", err)
- }
+ assert.Equal(t, test.expected, f.IsIncluded(tabletReplacement))
+ require.NoError(t, ts.CreateTablet(context.Background(), tabletReplacement))
tw.loadTablets()
key = TabletToMapKey(tabletReplacement)
allTablets = hc.GetAllTablets()
- if _, ok := allTablets[key]; ok != test.expected && proto.Equal(allTablets[key], tabletReplacement) != test.expected {
- t.Errorf("Error replacing tablet - got %v; want %v", ok, test.expected)
+ if test.expected {
+ assert.Contains(t, allTablets, key)
+ } else {
+ assert.NotContains(t, allTablets, key)
}
+ assert.Equal(t, test.expected, proto.Equal(tabletReplacement, allTablets[key]))
// Delete the tablet
- if err := ts.DeleteTablet(context.Background(), tabletReplacement.Alias); err != nil {
- t.Fatalf("DeleteTablet failed: %v", err)
- }
+ require.NoError(t, ts.DeleteTablet(context.Background(), tabletReplacement.Alias))
+ }
+}
+
+// TestFilterByKeyspaceSkipsIgnoredTablets confirms a bug fix for the case when a TopologyWatcher
+// has a FilterByKeyspace TabletFilter configured along with refreshKnownTablets turned off. We want
+// to ensure that the TopologyWatcher:
+// - does not continuously call GetTablets for tablets that do not satisfy the filter
+// - does not add or remove these filtered out tablets from its healthcheck
+func TestFilterByKeyspaceSkipsIgnoredTablets(t *testing.T) {
+ ts := memorytopo.NewServer("aa")
+ fhc := NewFakeHealthCheck(nil)
+ topologyWatcherOperations.ZeroAll()
+ counts := topologyWatcherOperations.Counts()
+ f := NewFilterByKeyspace(testKeyspacesToWatch)
+ tw := NewTopologyWatcher(context.Background(), ts, fhc, f, "aa", 10*time.Minute, false /*refreshKnownTablets*/, 5)
+
+ counts = checkOpCounts(t, counts, map[string]int64{})
+ checkChecksum(t, tw, 0)
+
+ // Add a tablet from a tracked keyspace to the topology.
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "aa",
+ Uid: 0,
+ },
+ Hostname: "host1",
+ PortMap: map[string]int32{
+ "vt": 123,
+ },
+ Keyspace: "ks1",
+ Shard: "shard",
}
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet))
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 1})
+ checkChecksum(t, tw, 3238442862)
+
+ // Check tablet is reported by HealthCheck
+ allTablets := fhc.GetAllTablets()
+ key := TabletToMapKey(tablet)
+ assert.Contains(t, allTablets, key)
+ assert.True(t, proto.Equal(tablet, allTablets[key]))
+
+ // Add a second tablet to the topology that should get filtered out by the keyspace filter
+ tablet2 := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "aa",
+ Uid: 2,
+ },
+ Hostname: "host2",
+ PortMap: map[string]int32{
+ "vt": 789,
+ },
+ Keyspace: "ks3",
+ Shard: "shard",
+ }
+ require.NoError(t, ts.CreateTablet(context.Background(), tablet2))
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0})
+ checkChecksum(t, tw, 2762153755)
+
+ // Check the new tablet is NOT reported by HealthCheck.
+ allTablets = fhc.GetAllTablets()
+ assert.Len(t, allTablets, 1)
+ key = TabletToMapKey(tablet2)
+ assert.NotContains(t, allTablets, key)
+
+ // Load the tablets again to show that when refreshKnownTablets is disabled,
+ // only the list is read from the topo and the checksum doesn't change
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0})
+ checkChecksum(t, tw, 2762153755)
+
+ // With refreshKnownTablets set to false, changes to the port map for the same tablet alias
+ // should not be reflected in the HealtCheck state
+ _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error {
+ t.PortMap["vt"] = 456
+ return nil
+ })
+ require.NoError(t, err)
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0})
+ checkChecksum(t, tw, 2762153755)
+
+ allTablets = fhc.GetAllTablets()
+ assert.Len(t, allTablets, 1)
+ origKey := TabletToMapKey(tablet)
+ tabletWithNewPort := proto.Clone(tablet).(*topodatapb.Tablet)
+ tabletWithNewPort.PortMap["vt"] = 456
+ keyWithNewPort := TabletToMapKey(tabletWithNewPort)
+ assert.Contains(t, allTablets, origKey)
+ assert.NotContains(t, allTablets, keyWithNewPort)
+
+ // Remove the tracked tablet from the topo and check that it is detected as being gone.
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet.Alias))
+
+ tw.loadTablets()
+ counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "RemoveTablet": 1})
+ checkChecksum(t, tw, 789108290)
+ assert.Empty(t, fhc.GetAllTablets())
+
+ // Remove ignored tablet and check that we didn't try to remove it from the health check
+ require.NoError(t, ts.DeleteTablet(context.Background(), tablet2.Alias))
+
+ tw.loadTablets()
+ checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0})
+ checkChecksum(t, tw, 0)
+ assert.Empty(t, fhc.GetAllTablets())
+
+ tw.Stop()
+}
+
+func TestNewFilterByTabletTags(t *testing.T) {
+ // no required tags == true
+ filter := NewFilterByTabletTags(nil)
+ assert.True(t, filter.IsIncluded(&topodatapb.Tablet{}))
+
+ tags := map[string]string{
+ "instance_type": "i3.xlarge",
+ "some_key": "some_value",
+ }
+ filter = NewFilterByTabletTags(tags)
+
+ assert.False(t, filter.IsIncluded(&topodatapb.Tablet{
+ Tags: nil,
+ }))
+ assert.False(t, filter.IsIncluded(&topodatapb.Tablet{
+ Tags: map[string]string{},
+ }))
+ assert.False(t, filter.IsIncluded(&topodatapb.Tablet{
+ Tags: map[string]string{
+ "instance_type": "i3.xlarge",
+ },
+ }))
+ assert.True(t, filter.IsIncluded(&topodatapb.Tablet{
+ Tags: tags,
+ }))
}
diff --git a/go/vt/vtgr/external/golib/sqlutils/dialect.go b/go/vt/external/golib/sqlutils/dialect.go
similarity index 100%
rename from go/vt/vtgr/external/golib/sqlutils/dialect.go
rename to go/vt/external/golib/sqlutils/dialect.go
diff --git a/go/vt/vtgr/external/golib/sqlutils/sqlite_dialect.go b/go/vt/external/golib/sqlutils/sqlite_dialect.go
similarity index 100%
rename from go/vt/vtgr/external/golib/sqlutils/sqlite_dialect.go
rename to go/vt/external/golib/sqlutils/sqlite_dialect.go
diff --git a/go/vt/vtgr/external/golib/sqlutils/sqlite_dialect_test.go b/go/vt/external/golib/sqlutils/sqlite_dialect_test.go
similarity index 100%
rename from go/vt/vtgr/external/golib/sqlutils/sqlite_dialect_test.go
rename to go/vt/external/golib/sqlutils/sqlite_dialect_test.go
diff --git a/go/vt/vtgr/external/golib/sqlutils/sqlutils.go b/go/vt/external/golib/sqlutils/sqlutils.go
similarity index 96%
rename from go/vt/vtgr/external/golib/sqlutils/sqlutils.go
rename to go/vt/external/golib/sqlutils/sqlutils.go
index f89d96229a3..91e83f0a4e4 100644
--- a/go/vt/vtgr/external/golib/sqlutils/sqlutils.go
+++ b/go/vt/external/golib/sqlutils/sqlutils.go
@@ -38,7 +38,7 @@ const DateTimeFormat = "2006-01-02 15:04:05.999999"
// for easy, typed getters by column name.
type RowMap map[string]CellData
-// Cell data is the result of a single (atomic) column in a single row
+// CellData is the result of a single (atomic) column in a single row
type CellData sql.NullString
func (this *CellData) MarshalJSON() ([]byte, error) {
@@ -200,22 +200,22 @@ func GetDB(mysql_uri string) (*sql.DB, bool, error) {
return GetGenericDB("mysql", mysql_uri)
}
-// GetDB returns a SQLite DB instance based on DB file name.
+// GetSQLiteDB returns a SQLite DB instance based on DB file name.
// bool result indicates whether the DB was returned from cache; err
func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) {
- return GetGenericDB("sqlite3", dbFile)
+ return GetGenericDB("sqlite", dbFile)
}
// RowToArray is a convenience function, typically not called directly, which maps a
// single read database row into a NullString
-func RowToArray(rows *sql.Rows, columns []string) []CellData {
+func RowToArray(rows *sql.Rows, columns []string) ([]CellData, error) {
buff := make([]any, len(columns))
data := make([]CellData, len(columns))
for i := range buff {
buff[i] = data[i].NullString()
}
- rows.Scan(buff...)
- return data
+ err := rows.Scan(buff...)
+ return data, err
}
// ScanRowsToArrays is a convenience function, typically not called directly, which maps rows
@@ -223,8 +223,11 @@ func RowToArray(rows *sql.Rows, columns []string) []CellData {
func ScanRowsToArrays(rows *sql.Rows, on_row func([]CellData) error) error {
columns, _ := rows.Columns()
for rows.Next() {
- arr := RowToArray(rows, columns)
- err := on_row(arr)
+ arr, err := RowToArray(rows, columns)
+ if err != nil {
+ return err
+ }
+ err = on_row(arr)
if err != nil {
return err
}
diff --git a/go/vt/grpcclient/client.go b/go/vt/grpcclient/client.go
index 8ad995721da..0bbe8a3142b 100644
--- a/go/vt/grpcclient/client.go
+++ b/go/vt/grpcclient/client.go
@@ -21,6 +21,7 @@ package grpcclient
import (
"context"
"crypto/tls"
+ "sync"
"time"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
@@ -39,6 +40,7 @@ import (
)
var (
+ grpcDialOptionsMu sync.Mutex
keepaliveTime = 10 * time.Second
keepaliveTimeout = 10 * time.Second
initialConnWindowSize int
@@ -88,6 +90,8 @@ var grpcDialOptions []func(opts []grpc.DialOption) ([]grpc.DialOption, error)
// RegisterGRPCDialOptions registers an implementation of AuthServer.
func RegisterGRPCDialOptions(grpcDialOptionsFunc func(opts []grpc.DialOption) ([]grpc.DialOption, error)) {
+ grpcDialOptionsMu.Lock()
+ defer grpcDialOptionsMu.Unlock()
grpcDialOptions = append(grpcDialOptions, grpcDialOptionsFunc)
}
@@ -106,11 +110,12 @@ func Dial(target string, failFast FailFast, opts ...grpc.DialOption) (*grpc.Clie
// what that should be.
func DialContext(ctx context.Context, target string, failFast FailFast, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
grpccommon.EnableTracingOpt()
- msgSize := grpccommon.MaxMessageSize()
+ maxSendSize := grpccommon.MaxMessageSendSize()
+ maxRecvSize := grpccommon.MaxMessageRecvSize()
newopts := []grpc.DialOption{
grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(msgSize),
- grpc.MaxCallSendMsgSize(msgSize),
+ grpc.MaxCallRecvMsgSize(maxRecvSize),
+ grpc.MaxCallSendMsgSize(maxSendSize),
grpc.WaitForReady(bool(!failFast)),
),
}
@@ -137,12 +142,14 @@ func DialContext(ctx context.Context, target string, failFast FailFast, opts ...
newopts = append(newopts, opts...)
var err error
+ grpcDialOptionsMu.Lock()
for _, grpcDialOptionInitializer := range grpcDialOptions {
newopts, err = grpcDialOptionInitializer(newopts)
if err != nil {
log.Fatalf("There was an error initializing client grpc.DialOption: %v", err)
}
}
+ grpcDialOptionsMu.Unlock()
newopts = append(newopts, interceptors()...)
diff --git a/go/vt/grpcclient/client_auth_static.go b/go/vt/grpcclient/client_auth_static.go
index 22f69569956..bbb91a9fa55 100644
--- a/go/vt/grpcclient/client_auth_static.go
+++ b/go/vt/grpcclient/client_auth_static.go
@@ -20,24 +20,35 @@ import (
"context"
"encoding/json"
"os"
+ "os/signal"
+ "sync"
+ "syscall"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
+
+ "vitess.io/vitess/go/vt/servenv"
)
var (
credsFile string // registered as --grpc_auth_static_client_creds in RegisterFlags
// StaticAuthClientCreds implements client interface to be able to WithPerRPCCredentials
_ credentials.PerRPCCredentials = (*StaticAuthClientCreds)(nil)
+
+ clientCreds *StaticAuthClientCreds
+ clientCredsCancel context.CancelFunc
+ clientCredsErr error
+ clientCredsMu sync.Mutex
+ clientCredsSigChan chan os.Signal
)
-// StaticAuthClientCreds holder for client credentials
+// StaticAuthClientCreds holder for client credentials.
type StaticAuthClientCreds struct {
Username string
Password string
}
-// GetRequestMetadata gets the request metadata as a map from StaticAuthClientCreds
+// GetRequestMetadata gets the request metadata as a map from StaticAuthClientCreds.
func (c *StaticAuthClientCreds) GetRequestMetadata(context.Context, ...string) (map[string]string, error) {
return map[string]string{
"username": c.Username,
@@ -47,30 +58,82 @@ func (c *StaticAuthClientCreds) GetRequestMetadata(context.Context, ...string) (
// RequireTransportSecurity indicates whether the credentials requires transport security.
// Given that people can use this with or without TLS, at the moment we are not enforcing
-// transport security
+// transport security.
func (c *StaticAuthClientCreds) RequireTransportSecurity() bool {
return false
}
// AppendStaticAuth optionally appends static auth credentials if provided.
func AppendStaticAuth(opts []grpc.DialOption) ([]grpc.DialOption, error) {
- if credsFile == "" {
- return opts, nil
- }
- data, err := os.ReadFile(credsFile)
+ creds, err := getStaticAuthCreds()
if err != nil {
return nil, err
}
- clientCreds := &StaticAuthClientCreds{}
- err = json.Unmarshal(data, clientCreds)
+ if creds != nil {
+ grpcCreds := grpc.WithPerRPCCredentials(creds)
+ opts = append(opts, grpcCreds)
+ }
+ return opts, nil
+}
+
+// ResetStaticAuth resets the static auth credentials.
+func ResetStaticAuth() {
+ clientCredsMu.Lock()
+ defer clientCredsMu.Unlock()
+ if clientCredsCancel != nil {
+ clientCredsCancel()
+ clientCredsCancel = nil
+ }
+ clientCreds = nil
+ clientCredsErr = nil
+}
+
+// getStaticAuthCreds returns the static auth creds and error.
+func getStaticAuthCreds() (*StaticAuthClientCreds, error) {
+ clientCredsMu.Lock()
+ defer clientCredsMu.Unlock()
+ if credsFile != "" && clientCreds == nil {
+ var ctx context.Context
+ ctx, clientCredsCancel = context.WithCancel(context.Background())
+ go handleClientCredsSignals(ctx)
+ clientCreds, clientCredsErr = loadStaticAuthCredsFromFile(credsFile)
+ }
+ return clientCreds, clientCredsErr
+}
+
+// handleClientCredsSignals handles signals to reload client creds.
+func handleClientCredsSignals(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-clientCredsSigChan:
+ if newCreds, err := loadStaticAuthCredsFromFile(credsFile); err == nil {
+ clientCredsMu.Lock()
+ clientCreds = newCreds
+ clientCredsErr = err
+ clientCredsMu.Unlock()
+ }
+ }
+ }
+}
+
+// loadStaticAuthCredsFromFile loads static auth credentials from a file.
+func loadStaticAuthCredsFromFile(path string) (*StaticAuthClientCreds, error) {
+ data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
- creds := grpc.WithPerRPCCredentials(clientCreds)
- opts = append(opts, creds)
- return opts, nil
+ creds := &StaticAuthClientCreds{}
+ err = json.Unmarshal(data, creds)
+ return creds, err
}
func init() {
+ servenv.OnInit(func() {
+ clientCredsSigChan = make(chan os.Signal, 1)
+ signal.Notify(clientCredsSigChan, syscall.SIGHUP)
+ _, _ = getStaticAuthCreds() // preload static auth credentials
+ })
RegisterGRPCDialOptions(AppendStaticAuth)
}
diff --git a/go/vt/grpcclient/client_auth_static_test.go b/go/vt/grpcclient/client_auth_static_test.go
new file mode 100644
index 00000000000..e14ace527d1
--- /dev/null
+++ b/go/vt/grpcclient/client_auth_static_test.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package grpcclient
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "google.golang.org/grpc"
+)
+
+func TestAppendStaticAuth(t *testing.T) {
+ {
+ clientCreds = nil
+ clientCredsErr = nil
+ opts, err := AppendStaticAuth([]grpc.DialOption{})
+ assert.Nil(t, err)
+ assert.Len(t, opts, 0)
+ }
+ {
+ clientCreds = nil
+ clientCredsErr = errors.New("test err")
+ opts, err := AppendStaticAuth([]grpc.DialOption{})
+ assert.NotNil(t, err)
+ assert.Len(t, opts, 0)
+ }
+ {
+ clientCreds = &StaticAuthClientCreds{Username: "test", Password: "123456"}
+ clientCredsErr = nil
+ opts, err := AppendStaticAuth([]grpc.DialOption{})
+ assert.Nil(t, err)
+ assert.Len(t, opts, 1)
+ }
+}
+
+func TestGetStaticAuthCreds(t *testing.T) {
+ tmp, err := os.CreateTemp("", t.Name())
+ assert.Nil(t, err)
+ defer os.Remove(tmp.Name())
+ credsFile = tmp.Name()
+ clientCredsSigChan = make(chan os.Signal, 1)
+
+ // load old creds
+ fmt.Fprint(tmp, `{"Username": "old", "Password": "123456"}`)
+ ResetStaticAuth()
+ creds, err := getStaticAuthCreds()
+ assert.Nil(t, err)
+ assert.Equal(t, &StaticAuthClientCreds{Username: "old", Password: "123456"}, creds)
+
+ // write new creds to the same file
+ _ = tmp.Truncate(0)
+ _, _ = tmp.Seek(0, 0)
+ fmt.Fprint(tmp, `{"Username": "new", "Password": "123456789"}`)
+
+ // test the creds did not change yet
+ creds, err = getStaticAuthCreds()
+ assert.Nil(t, err)
+ assert.Equal(t, &StaticAuthClientCreds{Username: "old", Password: "123456"}, creds)
+
+ // test SIGHUP signal triggers reload
+ credsOld := creds
+ clientCredsSigChan <- syscall.SIGHUP
+ timeoutChan := time.After(time.Second * 10)
+ for {
+ select {
+ case <-timeoutChan:
+ assert.Fail(t, "timed out waiting for SIGHUP reload of static auth creds")
+ return
+ default:
+ // confirm new creds get loaded
+ creds, err = getStaticAuthCreds()
+ if reflect.DeepEqual(creds, credsOld) {
+ continue // not changed yet
+ }
+ assert.Nil(t, err)
+ assert.Equal(t, &StaticAuthClientCreds{Username: "new", Password: "123456789"}, creds)
+ return
+ }
+ }
+}
+
+func TestLoadStaticAuthCredsFromFile(t *testing.T) {
+ {
+ f, err := os.CreateTemp("", t.Name())
+ if !assert.Nil(t, err) {
+ assert.FailNowf(t, "cannot create temp file: %s", err.Error())
+ }
+ defer os.Remove(f.Name())
+ fmt.Fprint(f, `{
+ "Username": "test",
+ "Password": "correct horse battery staple"
+ }`)
+ if !assert.Nil(t, err) {
+ assert.FailNowf(t, "cannot read auth file: %s", err.Error())
+ }
+
+ creds, err := loadStaticAuthCredsFromFile(f.Name())
+ assert.Nil(t, err)
+ assert.Equal(t, "test", creds.Username)
+ assert.Equal(t, "correct horse battery staple", creds.Password)
+ }
+ {
+ _, err := loadStaticAuthCredsFromFile(`does-not-exist`)
+ assert.NotNil(t, err)
+ }
+}
diff --git a/go/vt/grpcclient/client_flaky_test.go b/go/vt/grpcclient/client_flaky_test.go
index c6baad962de..edc6d9be98c 100644
--- a/go/vt/grpcclient/client_flaky_test.go
+++ b/go/vt/grpcclient/client_flaky_test.go
@@ -43,7 +43,7 @@ func TestDialErrors(t *testing.T) {
t.Fatal(err)
}
vtg := vtgateservicepb.NewVitessClient(gconn)
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{})
cancel()
gconn.Close()
diff --git a/go/vt/grpccommon/options.go b/go/vt/grpccommon/options.go
index ae9bea0172d..9d8e348ccb8 100644
--- a/go/vt/grpccommon/options.go
+++ b/go/vt/grpccommon/options.go
@@ -30,6 +30,10 @@ var (
// accept. Larger messages will be rejected.
// Note: We're using 16 MiB as default value because that's the default in MySQL
maxMessageSize = 16 * 1024 * 1024
+ // These options override maxMessageSize if > 0, allowing us to control the max
+ // size sending independently from receiving.
+ maxMsgRecvSize = 0
+ maxMsgSendSize = 0
// enableTracing sets a flag to enable grpc client/server tracing.
enableTracing bool
// enablePrometheus sets a flag to enable grpc client/server grpc monitoring.
@@ -43,6 +47,8 @@ var (
// command-line arguments.
func RegisterFlags(fs *pflag.FlagSet) {
fs.IntVar(&maxMessageSize, "grpc_max_message_size", maxMessageSize, "Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'.")
+ fs.IntVar(&maxMsgSendSize, "grpc_max_message_send_size", maxMsgSendSize, "Maximum allowed RPC message size when sending. If 0, defaults to grpc_max_message_size.")
+ fs.IntVar(&maxMsgRecvSize, "grpc_max_message_recv_size", maxMsgRecvSize, "Maximum allowed RPC message size when receiving. If 0, defaults to grpc_max_message_size.")
fs.BoolVar(&enableTracing, "grpc_enable_tracing", enableTracing, "Enable gRPC tracing.")
fs.BoolVar(&enablePrometheus, "grpc_prometheus", enablePrometheus, "Enable gRPC monitoring with Prometheus.")
}
@@ -70,6 +76,20 @@ func MaxMessageSize() int {
return maxMessageSize
}
+func MaxMessageRecvSize() int {
+ if maxMsgRecvSize > 0 {
+ return maxMsgRecvSize
+ }
+ return MaxMessageSize()
+}
+
+func MaxMessageSendSize() int {
+ if maxMsgSendSize > 0 {
+ return maxMsgSendSize
+ }
+ return MaxMessageSize()
+}
+
func init() {
stats.NewString("GrpcVersion").Set(grpc.Version)
}
diff --git a/go/vt/logutil/planetscale_logger.go b/go/vt/logutil/planetscale_logger.go
new file mode 100644
index 00000000000..7133188df4f
--- /dev/null
+++ b/go/vt/logutil/planetscale_logger.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logutil
+
+import (
+ pslog "github.com/planetscale/log"
+ noglog "github.com/slok/noglog"
+
+ "vitess.io/vitess/go/vt/log"
+)
+
+type PSLogger pslog.SugaredLogger
+
+// SetPlanetScaleLogger in-place noglog replacement with PlanetScale's logger.
+func SetPlanetScaleLogger(conf *pslog.Config) (psLogger *pslog.SugaredLogger, err error) {
+ // Use the passed configuration instead of the default configuration
+ if conf != nil {
+ configLogger, err := conf.Build()
+ if err != nil {
+ return nil, err
+ }
+ psLogger = configLogger.Sugar()
+ } else {
+ psLogger = pslog.NewPlanetScaleSugarLogger()
+ }
+
+ noglog.SetLogger(&noglog.LoggerFunc{
+ DebugfFunc: func(f string, a ...interface{}) { psLogger.Debugf(f, a...) },
+ InfofFunc: func(f string, a ...interface{}) { psLogger.Infof(f, a...) },
+ WarnfFunc: func(f string, a ...interface{}) { psLogger.Warnf(f, a...) },
+ ErrorfFunc: func(f string, a ...interface{}) { psLogger.Errorf(f, a...) },
+ })
+
+ log.Flush = noglog.Flush
+ log.Info = noglog.Info
+ log.Infof = noglog.Infof
+ log.InfoDepth = noglog.InfoDepth
+ log.Warning = noglog.Warning
+ log.Warningf = noglog.Warningf
+ log.WarningDepth = noglog.WarningDepth
+ log.Error = noglog.Error
+ log.Errorf = noglog.Errorf
+ log.ErrorDepth = noglog.ErrorDepth
+ log.Exit = noglog.Exit
+ log.Exitf = noglog.Exitf
+ log.ExitDepth = noglog.ExitDepth
+ log.Fatal = noglog.Fatal
+ log.Fatalf = noglog.Fatalf
+ log.FatalDepth = noglog.FatalDepth
+
+ return
+}
diff --git a/go/vt/logutil/planetscale_logger_test.go b/go/vt/logutil/planetscale_logger_test.go
new file mode 100644
index 00000000000..9433a0ddca0
--- /dev/null
+++ b/go/vt/logutil/planetscale_logger_test.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ pslog "github.com/planetscale/log"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ vtlog "vitess.io/vitess/go/vt/log"
+)
+
+// MemorySink implements zap.Sink by writing all messages to a buffer.
+// It's used to capture the logs.
+type MemorySink struct {
+ *bytes.Buffer
+}
+
+// Implement Close and Sync as no-ops to satisfy the interface. The Write
+// method is provided by the embedded buffer.
+func (s *MemorySink) Close() error { return nil }
+func (s *MemorySink) Sync() error { return nil }
+
+func SetupLoggerWithMemSink() (sink *MemorySink, err error) {
+ // Create a sink instance, and register it with zap for the "memory"
+ // protocol.
+ sink = &MemorySink{new(bytes.Buffer)}
+ err = zap.RegisterSink("memory", func(*url.URL) (zap.Sink, error) {
+ return sink, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ testLoggerConf := pslog.NewPlanetScaleConfig(pslog.DetectEncoding(), pslog.InfoLevel)
+ testLoggerConf.OutputPaths = []string{"memory://"}
+ testLoggerConf.ErrorOutputPaths = []string{"memory://"}
+ _, err = SetPlanetScaleLogger(&testLoggerConf)
+ if err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+func TestPSLogger_Replacing_glog(t *testing.T) {
+ type logMsg struct {
+ Level string `json:"level"`
+ Msg string `json:"msg"`
+ }
+
+ type testCase struct {
+ name string
+ logLevel zapcore.Level
+ }
+
+ dummyLogMessage := "testing log"
+ testCases := []testCase{
+ {"log info", pslog.InfoLevel},
+ {"log warn", pslog.WarnLevel},
+ {"log error", pslog.ErrorLevel},
+ }
+
+ sink, err := SetupLoggerWithMemSink()
+ assert.NoError(t, err)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var loggingFunc func(format string, args ...interface{})
+ var expectedLevel string
+
+ switch tc.logLevel {
+ case zapcore.InfoLevel:
+ loggingFunc = vtlog.Infof
+ expectedLevel = "info"
+ case zapcore.ErrorLevel:
+ loggingFunc = vtlog.Errorf
+ expectedLevel = "error"
+ case zapcore.WarnLevel:
+ loggingFunc = vtlog.Warningf
+ expectedLevel = "warn"
+ }
+
+ loggingFunc(dummyLogMessage)
+
+ // Unmarshal the captured log. This means we're getting a struct log.
+ actualLog := logMsg{}
+ err = json.Unmarshal(sink.Bytes(), &actualLog)
+ assert.NoError(t, err)
+ // Reset the sink so that it'll contain one log per test case.
+ sink.Reset()
+
+ assert.Equal(t, expectedLevel, actualLog.Level)
+ assert.Equal(t, dummyLogMessage, actualLog.Msg)
+
+ })
+ }
+}
diff --git a/go/vt/logutil/purge.go b/go/vt/logutil/purge.go
index 20f028d7187..8d85e15c5c9 100644
--- a/go/vt/logutil/purge.go
+++ b/go/vt/logutil/purge.go
@@ -127,7 +127,10 @@ func PurgeLogs() {
logDir := f.Value.String()
program := filepath.Base(os.Args[0])
ticker := time.NewTicker(purgeLogsInterval)
- for range ticker.C {
- purgeLogsOnce(time.Now(), logDir, program, keepLogsByCtime, keepLogsByMtime)
- }
+
+ go func() {
+ for range ticker.C {
+ purgeLogsOnce(time.Now(), logDir, program, keepLogsByCtime, keepLogsByMtime)
+ }
+ }()
}
diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go
index beddc33333c..660abcc5008 100644
--- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go
+++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go
@@ -264,7 +264,7 @@ func (bh *AZBlobBackupHandle) ReadFile(ctx context.Context, filename string) (io
}
blobURL := containerURL.NewBlobURL(obj)
- resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
+ resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if err != nil {
return nil, err
}
diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go
index 63868bf48ac..14592cf8fc2 100644
--- a/go/vt/mysqlctl/backup.go
+++ b/go/vt/mysqlctl/backup.go
@@ -17,16 +17,18 @@ limitations under the License.
package mysqlctl
import (
+ "bufio"
"errors"
"fmt"
+ "io"
"os"
"path/filepath"
- "strconv"
"strings"
"time"
"github.com/spf13/pflag"
+ "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"context"
@@ -94,16 +96,19 @@ var (
backupDuration = stats.NewGauge("backup_duration_seconds", "How long it took to complete the last backup operation (in seconds)")
restoreDuration = stats.NewGauge("restore_duration_seconds", "How long it took to complete the last restore operation (in seconds)")
+
+ EmptyBackupMessage = "no new data to backup, skipping it"
)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtbackup", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtbackup", "vtctld"} {
servenv.OnParseFor(cmd, registerBackupFlags)
}
}
func registerBackupFlags(fs *pflag.FlagSet) {
fs.StringVar(&backupStorageHook, "backup_storage_hook", backupStorageHook, "if set, we send the contents of the backup files through this hook.")
+ _ = fs.MarkDeprecated("backup_storage_hook", "consider using one of the builtin compression algorithms or --external-compressor and --external-decompressor instead.")
fs.BoolVar(&backupStorageCompress, "backup_storage_compress", backupStorageCompress, "if set, the backup files will be compressed (default is true). Set to false for instance if a backup_storage_hook is specified and it compresses the data.")
fs.IntVar(&backupCompressBlockSize, "backup_storage_block_size", backupCompressBlockSize, "if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000).")
fs.IntVar(&backupCompressBlocks, "backup_storage_number_blocks", backupCompressBlocks, "if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression.")
@@ -128,11 +133,13 @@ func Backup(ctx context.Context, params BackupParams) error {
return vterrors.Wrap(err, "StartBackup failed")
}
- be, err := GetBackupEngine()
+ be, err := GetBackupEngine(params.BackupEngine)
if err != nil {
return vterrors.Wrap(err, "failed to find backup engine")
}
+ params.Logger.Infof("Using backup engine %q", be.Name())
+
// Take the backup, and either AbortBackup or EndBackup.
usable, err := be.ExecuteBackup(ctx, params, bh)
logger := params.Logger
@@ -337,31 +344,18 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error)
return nil, err
}
- // mysqld needs to be running in order for mysql_upgrade to work.
- // If we've just restored from a backup from previous MySQL version then mysqld
- // may fail to start due to a different structure of mysql.* tables. The flag
- // --skip-grant-tables ensures that these tables are not read until mysql_upgrade
- // is executed. And since with --skip-grant-tables anyone can connect to MySQL
- // without password, we are passing --skip-networking to greatly reduce the set
- // of those who can connect.
- params.Logger.Infof("Restore: starting mysqld for mysql_upgrade")
- // Note Start will use dba user for waiting, this is fine, it will be allowed.
- err = params.Mysqld.Start(context.Background(), params.Cnf, "--skip-grant-tables", "--skip-networking")
- if err != nil {
- return nil, err
- }
-
- // We disable super_read_only, in case it is in the default MySQL startup
- // parameters and will be blocking the writes we need to do in
- // PopulateMetadataTables(). We do it blindly, since
- // this will fail on MariaDB, which doesn't have super_read_only
- // This is safe, since we're restarting MySQL after the restore anyway
- params.Logger.Infof("Restore: disabling super_read_only")
- if err := params.Mysqld.SetSuperReadOnly(false); err != nil {
- if strings.Contains(err.Error(), strconv.Itoa(mysql.ERUnknownSystemVariable)) {
- params.Logger.Warningf("Restore: server does not know about super_read_only, continuing anyway...")
- } else {
- params.Logger.Errorf("Restore: unexpected error while trying to set super_read_only: %v", err)
+ if re.ShouldStartMySQLAfterRestore() {
+ // mysqld needs to be running in order for mysql_upgrade to work.
+ // If we've just restored from a backup from previous MySQL version then mysqld
+ // may fail to start due to a different structure of mysql.* tables. The flag
+ // --skip-grant-tables ensures that these tables are not read until mysql_upgrade
+ // is executed. And since with --skip-grant-tables anyone can connect to MySQL
+ // without password, we are passing --skip-networking to greatly reduce the set
+ // of those who can connect.
+ params.Logger.Infof("Restore: starting mysqld for mysql_upgrade")
+ // Note Start will use dba user for waiting, this is fine, it will be allowed.
+ err = params.Mysqld.Start(context.Background(), params.Cnf, "--skip-grant-tables", "--skip-networking")
+ if err != nil {
return nil, err
}
}
@@ -402,3 +396,24 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error)
restoreDuration.Set(int64(time.Since(startTs).Seconds()))
return manifest, nil
}
+
+// scanLinesToLogger scans full lines from the given Reader and sends them to
+// the given Logger until EOF.
+func scanLinesToLogger(prefix string, reader io.Reader, logger logutil.Logger, doneFunc func()) {
+ defer doneFunc()
+
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ line := scanner.Text()
+ logger.Infof("%s: %s", prefix, line)
+ }
+ if err := scanner.Err(); err != nil {
+ // This is usually run in a background goroutine, so there's no point
+ // returning an error. Just log it.
+ logger.Warningf("error scanning lines from %s: %v", prefix, err)
+ }
+}
+
+func FormatRFC3339(t time.Time) string {
+ return t.Format(time.RFC3339)
+}
diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go
index 16db1a72f8a..d68af0e044a 100644
--- a/go/vt/mysqlctl/backup_test.go
+++ b/go/vt/mysqlctl/backup_test.go
@@ -17,27 +17,24 @@ limitations under the License.
package mysqlctl
import (
+ "fmt"
+ "io"
"os"
"path"
"reflect"
"sort"
+ "sync"
"testing"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/vt/logutil"
)
-func TestFindFilesToBackup(t *testing.T) {
+func TestFindFilesToBackupWithoutRedoLog(t *testing.T) {
root := t.TempDir()
- // get the flavor and version to deal with any behavioral differences
- versionStr, err := GetVersionString()
- require.NoError(t, err)
- flavor, version, err := ParseVersionString(versionStr)
- require.NoError(t, err)
- features := newCapabilitySet(flavor, version)
-
// Initialize the fake mysql root directories
innodbDataDir := path.Join(root, "innodb_data")
innodbLogDir := path.Join(root, "innodb_log")
@@ -54,10 +51,6 @@ func TestFindFilesToBackup(t *testing.T) {
}
innodbLogFile := "innodb_log_1"
- if features.hasDynamicRedoLogCapacity() {
- os.Mkdir(path.Join(innodbLogDir, mysql.DynamicRedoLogSubdir), os.ModePerm)
- innodbLogFile = path.Join(mysql.DynamicRedoLogSubdir, "#ib_redo1")
- }
if err := os.WriteFile(path.Join(innodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm); err != nil {
t.Fatalf("failed to write file innodb_data_1: %v", err)
@@ -130,8 +123,123 @@ func TestFindFilesToBackup(t *testing.T) {
}
}
+func TestFindFilesToBackupWithRedoLog(t *testing.T) {
+ root := t.TempDir()
+
+ // Initialize the fake mysql root directories
+ innodbDataDir := path.Join(root, "innodb_data")
+ innodbLogDir := path.Join(root, "innodb_log")
+ dataDir := path.Join(root, "data")
+ dataDbDir := path.Join(dataDir, "vt_db")
+ extraDir := path.Join(dataDir, "extra_dir")
+ outsideDbDir := path.Join(root, "outside_db")
+ rocksdbDir := path.Join(dataDir, ".rocksdb")
+ sdiOnlyDir := path.Join(dataDir, "sdi_dir")
+ for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir, rocksdbDir, sdiOnlyDir} {
+ if err := os.MkdirAll(s, os.ModePerm); err != nil {
+ t.Fatalf("failed to create directory %v: %v", s, err)
+ }
+ }
+
+ cnf := &Mycnf{
+ InnodbDataHomeDir: innodbDataDir,
+ InnodbLogGroupHomeDir: innodbLogDir,
+ DataDir: dataDir,
+ }
+
+ os.Mkdir(path.Join(innodbLogDir, mysql.DynamicRedoLogSubdir), os.ModePerm)
+ innodbLogFile := path.Join(mysql.DynamicRedoLogSubdir, "#ib_redo1")
+
+ if err := os.WriteFile(path.Join(innodbDataDir, "innodb_data_1"), []byte("innodb data 1 contents"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file innodb_data_1: %v", err)
+ }
+ if err := os.WriteFile(path.Join(innodbLogDir, innodbLogFile), []byte("innodb log 1 contents"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file %s: %v", innodbLogFile, err)
+ }
+ if err := os.WriteFile(path.Join(dataDbDir, "db.opt"), []byte("db opt file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file db.opt: %v", err)
+ }
+ if err := os.WriteFile(path.Join(extraDir, "extra.stuff"), []byte("extra file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file extra.stuff: %v", err)
+ }
+ if err := os.WriteFile(path.Join(outsideDbDir, "table1.frm"), []byte("frm file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file table1.opt: %v", err)
+ }
+ if err := os.Symlink(outsideDbDir, path.Join(dataDir, "vt_symlink")); err != nil {
+ t.Fatalf("failed to symlink vt_symlink: %v", err)
+ }
+ if err := os.WriteFile(path.Join(rocksdbDir, "000011.sst"), []byte("rocksdb file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file 000011.sst: %v", err)
+ }
+ if err := os.WriteFile(path.Join(sdiOnlyDir, "table1.sdi"), []byte("sdi file"), os.ModePerm); err != nil {
+ t.Fatalf("failed to write file table1.sdi: %v", err)
+ }
+
+ result, totalSize, err := findFilesToBackup(cnf)
+ if err != nil {
+ t.Fatalf("findFilesToBackup failed: %v", err)
+ }
+ sort.Sort(forTest(result))
+ t.Logf("findFilesToBackup returned: %v", result)
+ expected := []FileEntry{
+ {
+ Base: "Data",
+ Name: ".rocksdb/000011.sst",
+ },
+ {
+ Base: "Data",
+ Name: "sdi_dir/table1.sdi",
+ },
+ {
+ Base: "Data",
+ Name: "vt_db/db.opt",
+ },
+ {
+ Base: "Data",
+ Name: "vt_symlink/table1.frm",
+ },
+ {
+ Base: "InnoDBData",
+ Name: "innodb_data_1",
+ },
+ {
+ Base: "InnoDBLog",
+ Name: innodbLogFile,
+ },
+ }
+ if !reflect.DeepEqual(result, expected) {
+ t.Fatalf("got wrong list of FileEntry %v, expected %v", result, expected)
+ }
+ if totalSize <= 0 {
+ t.Fatalf("backup size should be > 0, got %v", totalSize)
+ }
+}
+
type forTest []FileEntry
func (f forTest) Len() int { return len(f) }
func (f forTest) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f forTest) Less(i, j int) bool { return f[i].Base+f[i].Name < f[j].Base+f[j].Name }
+
+func TestScanLinesToLogger(t *testing.T) {
+ reader, writer := io.Pipe()
+ logger := logutil.NewMemoryLogger()
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go scanLinesToLogger("test", reader, logger, wg.Done)
+
+ for i := range [100]int{} {
+ _, err := writer.Write([]byte(fmt.Sprintf("foobar %d\n", i)))
+ require.NoError(t, err)
+ }
+
+ writer.Close()
+ wg.Wait()
+
+ require.Equal(t, 100, len(logger.Events))
+
+ for i, event := range logger.Events {
+ require.Equal(t, fmt.Sprintf("test: foobar %d", i), event.Value)
+ }
+}
diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go
index dceb34e3d40..d7f6251c2ae 100644
--- a/go/vt/mysqlctl/backupengine.go
+++ b/go/vt/mysqlctl/backupengine.go
@@ -46,6 +46,7 @@ var (
type BackupEngine interface {
ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error)
ShouldDrainForBackup() bool
+ Name() string
}
// BackupParams is the struct that holds all params passed to ExecuteBackup
@@ -67,6 +68,24 @@ type BackupParams struct {
TabletAlias string
// BackupTime is the time at which the backup is being started
BackupTime time.Time
+ // BackupEngine allows us to override which backup engine should be used for a request
+ BackupEngine string
+}
+
+func (b *BackupParams) Copy() BackupParams {
+ return BackupParams{
+ Cnf: b.Cnf,
+ Mysqld: b.Mysqld,
+ Logger: b.Logger,
+ Concurrency: b.Concurrency,
+ HookExtraEnv: b.HookExtraEnv,
+ TopoServer: b.TopoServer,
+ Keyspace: b.Keyspace,
+ Shard: b.Shard,
+ TabletAlias: b.TabletAlias,
+ BackupTime: b.BackupTime,
+ BackupEngine: b.BackupEngine,
+ }
}
// RestoreParams is the struct that holds all params passed to ExecuteRestore
@@ -93,12 +112,31 @@ type RestoreParams struct {
// StartTime: if non-zero, look for a backup that was taken at or before this time
// Otherwise, find the most recent backup
StartTime time.Time
+ // AllowedBackupEngines if present will filter out any backups taken with engines not included in the list
+ AllowedBackupEngines []string
+}
+
+func (p *RestoreParams) Copy() RestoreParams {
+ return RestoreParams{
+ Cnf: p.Cnf,
+ Mysqld: p.Mysqld,
+ Logger: p.Logger,
+ Concurrency: p.Concurrency,
+ HookExtraEnv: p.HookExtraEnv,
+ DeleteBeforeRestore: p.DeleteBeforeRestore,
+ DbName: p.DbName,
+ Keyspace: p.Keyspace,
+ Shard: p.Shard,
+ StartTime: p.StartTime,
+ AllowedBackupEngines: p.AllowedBackupEngines,
+ }
}
// RestoreEngine is the interface to restore a backup with a given engine.
// Returns the manifest of a backup if successful, otherwise returns an error
type RestoreEngine interface {
ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error)
+ ShouldStartMySQLAfterRestore() bool
}
// BackupRestoreEngine is a combination of BackupEngine and RestoreEngine.
@@ -112,7 +150,7 @@ type BackupRestoreEngine interface {
var BackupRestoreEngineMap = make(map[string]BackupRestoreEngine)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain", "vtbackup"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtbackup"} {
servenv.OnParseFor(cmd, registerBackupEngineFlags)
}
}
@@ -128,8 +166,12 @@ func registerBackupEngineFlags(fs *pflag.FlagSet) {
// a particular backup by calling GetRestoreEngine().
//
// This must only be called after flags have been parsed.
-func GetBackupEngine() (BackupEngine, error) {
+func GetBackupEngine(backupEngine string) (BackupEngine, error) {
name := backupEngineImplementation
+ if backupEngine != "" {
+ name = backupEngine
+ }
+
be, ok := BackupRestoreEngineMap[name]
if !ok {
return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "unknown BackupEngine implementation %q", name)
@@ -220,6 +262,12 @@ func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backup
continue
}
+ // if allowed backup engine is not empty, we only try to restore from backups taken with the specified backup engines
+ if len(params.AllowedBackupEngines) > 0 && !sliceContains(params.AllowedBackupEngines, bm.BackupMethod) {
+ params.Logger.Warningf("Ignoring backup %v because it is using %q backup engine", bh.Name(), bm.BackupMethod)
+ continue
+ }
+
var backupTime time.Time
if checkBackupTime {
backupTime, err = time.Parse(time.RFC3339, bm.BackupTime)
@@ -390,31 +438,24 @@ func addMySQL8DataDictionary(fes []FileEntry, base string, baseDir string) ([]Fi
return fes, fi.Size(), nil
}
+func hasDynamicRedoLog(cnf *Mycnf) bool {
+ dynamicRedoLogPath := path.Join(cnf.InnodbLogGroupHomeDir, mysql.DynamicRedoLogSubdir)
+ info, err := os.Stat(dynamicRedoLogPath)
+ return !os.IsNotExist(err) && info.IsDir()
+}
+
func findFilesToBackup(cnf *Mycnf) ([]FileEntry, int64, error) {
var err error
var result []FileEntry
var size, totalSize int64
- var flavor MySQLFlavor
- var version ServerVersion
- var features capabilitySet
-
- // get the flavor and version to deal with any behavioral differences
- versionStr, err := GetVersionString()
- if err != nil {
- return nil, 0, err
- }
- flavor, version, err = ParseVersionString(versionStr)
- if err != nil {
- return nil, 0, err
- }
- features = newCapabilitySet(flavor, version)
// first add innodb files
result, totalSize, err = addDirectory(result, backupInnodbDataHomeDir, cnf.InnodbDataHomeDir, "")
if err != nil {
return nil, 0, err
}
- if features.hasDynamicRedoLogCapacity() {
+
+ if hasDynamicRedoLog(cnf) {
result, size, err = addDirectory(result, backupInnodbLogGroupHomeDir, cnf.InnodbLogGroupHomeDir, mysql.DynamicRedoLogSubdir)
} else {
result, size, err = addDirectory(result, backupInnodbLogGroupHomeDir, cnf.InnodbLogGroupHomeDir, "")
diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go
index 09323c9387b..aad5bbdfca1 100644
--- a/go/vt/mysqlctl/builtinbackupengine.go
+++ b/go/vt/mysqlctl/builtinbackupengine.go
@@ -113,7 +113,7 @@ type FileEntry struct {
}
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
servenv.OnParseFor(cmd, registerBuiltinBackupEngineFlags)
}
}
@@ -160,9 +160,10 @@ func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) {
// ExecuteBackup returns a boolean that indicates if the backup is usable,
// and an overall error.
func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) {
-
params.Logger.Infof("Hook: %v, Compress: %v", backupStorageHook, backupStorageCompress)
-
+ if backupStorageHook != "" {
+ log.Warning("Flag --backup_storage_hook has been deprecated, consider using one of the builtin compression algorithms or --external-compressor and --external-decompressor instead.")
+ }
// Save initial state so we can restore.
replicaStartRequired := false
sourceIsPrimary := false
@@ -300,7 +301,6 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP
// backupFiles finds the list of files to backup, and creates the backup.
func (be *BuiltinBackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, replicationPosition mysql.Position) (finalErr error) {
-
// Get the files to backup.
// We don't care about totalSize because we add each file separately.
fes, _, err := findFilesToBackup(params.Cnf)
@@ -591,6 +591,9 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor
if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil {
return nil, err
}
+ if bm.TransformHook != "" {
+ log.Warning("Flag --backup_storage_hook has been deprecated, consider using one of the builtin compression algorithms or --external-compressor and --external-decompressor instead.")
+ }
params.Logger.Infof("Restore: copying %v files", len(bm.FileEntries))
@@ -606,6 +609,16 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor
// restoreFiles will copy all the files from the BackupStorage to the
// right place.
func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error {
+ // For optimization, we are replacing pargzip with pgzip, so newBuiltinDecompressor doesn't have to compare and print warning for every file
+ // since newBuiltinDecompressor is helper method and does not hold any state, it was hard to do it in that method itself.
+ if bm.CompressionEngine == PargzipCompressor {
+ params.Logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`)
+ bm.CompressionEngine = PgzipCompressor
+ defer func() {
+ bm.CompressionEngine = PargzipCompressor
+ }()
+ }
+
fes := bm.FileEntries
sema := sync2.NewSemaphore(params.Concurrency, 0)
rec := concurrency.AllErrorRecorder{}
@@ -757,6 +770,13 @@ func (be *BuiltinBackupEngine) ShouldDrainForBackup() bool {
return true
}
+// ShouldStartMySQLAfterRestore signifies if this backup engine needs to restart MySQL once the restore is completed.
+func (be *BuiltinBackupEngine) ShouldStartMySQLAfterRestore() bool {
+ return true
+}
+
+func (be *BuiltinBackupEngine) Name() string { return builtinBackupEngineName }
+
func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (mysql.Position, error) {
si, err := ts.GetShard(ctx, keyspace, shard)
if err != nil {
diff --git a/go/vt/mysqlctl/builtinbackupengine_test.go b/go/vt/mysqlctl/builtinbackupengine_test.go
index b6837380db7..280de5ac18f 100644
--- a/go/vt/mysqlctl/builtinbackupengine_test.go
+++ b/go/vt/mysqlctl/builtinbackupengine_test.go
@@ -19,7 +19,6 @@ import (
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
"vitess.io/vitess/go/vt/mysqlctl/filebackupstorage"
"vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vttime"
@@ -104,7 +103,7 @@ func TestExecuteBackup(t *testing.T) {
// Spin up a fake daemon to be used in backups. It needs to be allowed to receive:
// "STOP SLAVE", "START SLAVE", in that order.
- mysqld := fakemysqldaemon.NewFakeMysqlDaemon(fakesqldb.New(t))
+ mysqld := mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t))
mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"}
// mysqld.ShutdownTime = time.Minute
diff --git a/go/vt/mysqlctl/capabilityset.go b/go/vt/mysqlctl/capabilityset.go
index 331592a76bc..88b17e4e455 100644
--- a/go/vt/mysqlctl/capabilityset.go
+++ b/go/vt/mysqlctl/capabilityset.go
@@ -51,17 +51,6 @@ func (c *capabilitySet) hasMaria104InstallDb() bool {
return c.isMariaDB() && c.version.atLeast(ServerVersion{Major: 10, Minor: 4, Patch: 0})
}
-// hasDynamicRedoLogCapacity tells you if the version of MySQL in use supports dynamic redo log
-// capacity.
-// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the
-// (/. by default) called "#innodb_redo" and you can
-// dynamically adjust the capacity of redo log space in the running server. See:
-//
-// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity
-func (c *capabilitySet) hasDynamicRedoLogCapacity() bool {
- return c.isMySQLLike() && c.version.atLeast(ServerVersion{Major: 8, Minor: 0, Patch: 30})
-}
-
// IsMySQLLike tests if the server is either MySQL
// or Percona Server. At least currently, Vitess doesn't
// make use of any specific Percona Server features.
diff --git a/go/vt/mysqlctl/compression.go b/go/vt/mysqlctl/compression.go
index 40c4dc344a3..ea8f96cc100 100644
--- a/go/vt/mysqlctl/compression.go
+++ b/go/vt/mysqlctl/compression.go
@@ -65,7 +65,7 @@ var (
)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtbackup", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
servenv.OnParseFor(cmd, registerBackupCompressionFlags)
}
}
@@ -193,7 +193,7 @@ func newExternalDecompressor(ctx context.Context, cmdStr string, reader io.Reade
// This returns a reader that will decompress the underlying provided reader and will use the specified supported engine.
func newBuiltinDecompressor(engine string, reader io.Reader, logger logutil.Logger) (decompressor io.ReadCloser, err error) {
if engine == PargzipCompressor {
- logger.Warningf("engine \"pargzip\" doesn't support decompression, using \"pgzip\" instead")
+ logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`)
engine = PgzipCompressor
}
diff --git a/go/vt/mysqlctl/fakebackupengine.go b/go/vt/mysqlctl/fakebackupengine.go
new file mode 100644
index 00000000000..f817f255127
--- /dev/null
+++ b/go/vt/mysqlctl/fakebackupengine.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctl
+
+import (
+ "context"
+ "time"
+
+ "vitess.io/vitess/go/vt/mysqlctl/backupstorage"
+)
+
+const fakeBackupEngineName = "fake"
+
+type FakeBackupEngine struct {
+ ExecuteBackupCalls []FakeBackupEngineExecuteBackupCall
+ ExecuteBackupDuration time.Duration
+ ExecuteBackupReturn FakeBackupEngineExecuteBackupReturn
+ ExecuteRestoreCalls []FakeBackupEngineExecuteRestoreCall
+ ExecuteRestoreDuration time.Duration
+ ExecuteRestoreReturn FakeBackupEngineExecuteRestoreReturn
+ ShouldDrainForBackupCalls int
+ ShouldDrainForBackupReturn bool
+}
+
+type FakeBackupEngineExecuteBackupCall struct {
+ BackupParams BackupParams
+ BackupHandle backupstorage.BackupHandle
+}
+
+type FakeBackupEngineExecuteBackupReturn struct {
+ Ok bool
+ Err error
+}
+
+type FakeBackupEngineExecuteRestoreCall struct {
+ BackupHandle backupstorage.BackupHandle
+ RestoreParams RestoreParams
+}
+
+type FakeBackupEngineExecuteRestoreReturn struct {
+ Manifest *BackupManifest
+ Err error
+}
+
+func (be *FakeBackupEngine) ExecuteBackup(
+ ctx context.Context,
+ params BackupParams,
+ bh backupstorage.BackupHandle,
+) (bool, error) {
+ be.ExecuteBackupCalls = append(be.ExecuteBackupCalls, FakeBackupEngineExecuteBackupCall{params, bh})
+
+ if be.ExecuteBackupDuration > 0 {
+ time.Sleep(be.ExecuteBackupDuration)
+ }
+
+ return be.ExecuteBackupReturn.Ok, be.ExecuteBackupReturn.Err
+}
+
+func (be *FakeBackupEngine) ExecuteRestore(
+ ctx context.Context, params RestoreParams,
+ bh backupstorage.BackupHandle,
+) (*BackupManifest, error) {
+ be.ExecuteRestoreCalls = append(be.ExecuteRestoreCalls, FakeBackupEngineExecuteRestoreCall{bh, params})
+
+ // mark restore as in progress
+ if err := createStateFile(params.Cnf); err != nil {
+ return nil, err
+ }
+
+ if be.ExecuteRestoreDuration > 0 {
+ time.Sleep(be.ExecuteRestoreDuration)
+ }
+
+ return be.ExecuteRestoreReturn.Manifest, be.ExecuteRestoreReturn.Err
+}
+
+func (be *FakeBackupEngine) ShouldDrainForBackup() bool {
+ be.ShouldDrainForBackupCalls = be.ShouldDrainForBackupCalls + 1
+ return be.ShouldDrainForBackupReturn
+}
+
+func (be *FakeBackupEngine) Name() string { return fakeBackupEngineName }
+
+func (be *FakeBackupEngine) ShouldStartMySQLAfterRestore() bool {
+ return true
+}
diff --git a/go/vt/mysqlctl/fakebackupstorage.go b/go/vt/mysqlctl/fakebackupstorage.go
new file mode 100644
index 00000000000..a3adff0dbd0
--- /dev/null
+++ b/go/vt/mysqlctl/fakebackupstorage.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctl
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "vitess.io/vitess/go/vt/concurrency"
+ "vitess.io/vitess/go/vt/mysqlctl/backupstorage"
+)
+
+type FakeBackupHandle struct {
+ Dir string
+ NameV string
+ ReadOnly bool
+ Errors concurrency.AllErrorRecorder
+
+ AbortBackupCalls []context.Context
+ AbortBackupReturn error
+ AddFileCalls []FakeBackupHandleAddFileCall
+ AddFileReturn FakeBackupHandleAddFileReturn
+ EndBackupCalls []context.Context
+ EndBackupReturn error
+ ReadFileCalls []FakeBackupHandleReadFileCall
+ ReadFileReturnF func(ctx context.Context, filename string) (io.ReadCloser, error)
+}
+
+type FakeBackupHandleAddFileCall struct {
+ Ctx context.Context
+ Filename string
+ Filesize int64
+}
+
+type FakeBackupHandleAddFileReturn struct {
+ WriteCloser io.WriteCloser
+ Err error
+}
+
+type FakeBackupHandleReadFileCall struct {
+ Ctx context.Context
+ Filename string
+}
+
+func (fbh *FakeBackupHandle) RecordError(err error) {
+ fbh.Errors.RecordError(err)
+}
+
+func (fbh *FakeBackupHandle) HasErrors() bool {
+ return fbh.Errors.HasErrors()
+}
+
+func (fbh *FakeBackupHandle) Error() error {
+ return fbh.Errors.Error()
+}
+
+func (fbh *FakeBackupHandle) Directory() string {
+ return fbh.Dir
+}
+
+func (fbh *FakeBackupHandle) Name() string {
+ return fbh.NameV
+}
+
+func (fbh *FakeBackupHandle) AddFile(ctx context.Context, filename string, filesize int64) (io.WriteCloser, error) {
+ fbh.AddFileCalls = append(fbh.AddFileCalls, FakeBackupHandleAddFileCall{ctx, filename, filesize})
+ return fbh.AddFileReturn.WriteCloser, fbh.AddFileReturn.Err
+}
+
+func (fbh *FakeBackupHandle) EndBackup(ctx context.Context) error {
+ fbh.EndBackupCalls = append(fbh.EndBackupCalls, ctx)
+ return fbh.EndBackupReturn
+}
+
+func (fbh *FakeBackupHandle) AbortBackup(ctx context.Context) error {
+ fbh.AbortBackupCalls = append(fbh.AbortBackupCalls, ctx)
+ return fbh.AbortBackupReturn
+}
+
+func (fbh *FakeBackupHandle) ReadFile(ctx context.Context, filename string) (io.ReadCloser, error) {
+ fbh.ReadFileCalls = append(fbh.ReadFileCalls, FakeBackupHandleReadFileCall{ctx, filename})
+ if fbh.ReadFileReturnF == nil {
+ return nil, fmt.Errorf("FakeBackupHandle has not defined a ReadFileReturnF")
+ }
+ return fbh.ReadFileReturnF(ctx, filename)
+}
+
+type FakeBackupStorage struct {
+ CloseCalls int
+ CloseReturn error
+ ListBackupsCalls []FakeBackupStorageListBackupsCall
+ ListBackupsReturn FakeBackupStorageListBackupsReturn
+ RemoveBackupCalls []FakeBackupStorageRemoveBackupCall
+ RemoveBackupReturn error
+ RemoveBackupReturne error
+ StartBackupCalls []FakeBackupStorageStartBackupCall
+ StartBackupReturn FakeBackupStorageStartBackupReturn
+}
+
+type FakeBackupStorageListBackupsCall struct {
+ Ctx context.Context
+ Dir string
+}
+
+type FakeBackupStorageListBackupsReturn struct {
+ BackupHandles []backupstorage.BackupHandle
+ Err error
+}
+
+type FakeBackupStorageRemoveBackupCall struct {
+ Ctx context.Context
+ Dir string
+ Name string
+}
+
+type FakeBackupStorageStartBackupCall struct {
+ Ctx context.Context
+ Dir string
+ Name string
+}
+
+type FakeBackupStorageStartBackupReturn struct {
+ BackupHandle backupstorage.BackupHandle
+ Err error
+}
+
+func (fbs *FakeBackupStorage) ListBackups(ctx context.Context, dir string) ([]backupstorage.BackupHandle, error) {
+ fbs.ListBackupsCalls = append(fbs.ListBackupsCalls, FakeBackupStorageListBackupsCall{ctx, dir})
+ return fbs.ListBackupsReturn.BackupHandles, fbs.ListBackupsReturn.Err
+}
+
+func (fbs *FakeBackupStorage) StartBackup(ctx context.Context, dir, name string) (backupstorage.BackupHandle, error) {
+ fbs.StartBackupCalls = append(fbs.StartBackupCalls, FakeBackupStorageStartBackupCall{ctx, dir, name})
+ return fbs.StartBackupReturn.BackupHandle, fbs.StartBackupReturn.Err
+}
+
+func (fbs *FakeBackupStorage) RemoveBackup(ctx context.Context, dir, name string) error {
+ fbs.RemoveBackupCalls = append(fbs.RemoveBackupCalls, FakeBackupStorageRemoveBackupCall{ctx, dir, name})
+ return fbs.RemoveBackupReturn
+}
+
+func (fbs *FakeBackupStorage) Close() error {
+ fbs.CloseCalls = fbs.CloseCalls + 1
+ return fbs.CloseReturn
+}
diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go
similarity index 91%
rename from go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go
rename to go/vt/mysqlctl/fakemysqldaemon.go
index 5402be5e540..5d087bc36ec 100644
--- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go
+++ b/go/vt/mysqlctl/fakemysqldaemon.go
@@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package fakemysqldaemon
+package mysqlctl
import (
+ "errors"
"fmt"
"reflect"
"strings"
@@ -30,7 +31,6 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/dbconnpool"
- "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/mysqlctl/tmutils"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -168,9 +168,16 @@ type FakeMysqlDaemon struct {
// SemiSyncReplicaEnabled represents the state of rpl_semi_sync_slave_enabled.
SemiSyncReplicaEnabled bool
- // TimeoutHook is a func that can be called at the beginning of any method to fake a timeout.
- // all a test needs to do is make it { return context.DeadlineExceeded }
+ // GlobalReadLock is used to test if a lock has been acquired already or not
+ GlobalReadLock bool
+
+ // TimeoutHook is a func that can be called at the beginning of
+ // any method to fake a timeout.
+ // All a test needs to do is make it { return context.DeadlineExceeded }.
TimeoutHook func() error
+
+ // Version is the version that will be returned by GetVersionString.
+ Version string
}
// NewFakeMysqlDaemon returns a FakeMysqlDaemon where mysqld appears
@@ -181,6 +188,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon {
db: db,
Running: true,
IOThreadRunning: true,
+ Version: "8.0.32",
}
if db != nil {
result.appPool = dbconnpool.NewConnectionPool("AppConnPool", 5, time.Minute, 0)
@@ -190,7 +198,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon {
}
// Start is part of the MysqlDaemon interface
-func (fmd *FakeMysqlDaemon) Start(ctx context.Context, cnf *mysqlctl.Mycnf, mysqldArgs ...string) error {
+func (fmd *FakeMysqlDaemon) Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error {
if fmd.Running {
return fmt.Errorf("fake mysql daemon already running")
}
@@ -208,7 +216,7 @@ func (fmd *FakeMysqlDaemon) Start(ctx context.Context, cnf *mysqlctl.Mycnf, mysq
}
// Shutdown is part of the MysqlDaemon interface
-func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *mysqlctl.Mycnf, waitForMysqld bool) error {
+func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error {
if !fmd.Running {
return fmt.Errorf("fake mysql daemon not running")
}
@@ -231,17 +239,17 @@ func (fmd *FakeMysqlDaemon) RunMysqlUpgrade() error {
}
// ReinitConfig is part of the MysqlDaemon interface
-func (fmd *FakeMysqlDaemon) ReinitConfig(ctx context.Context, cnf *mysqlctl.Mycnf) error {
+func (fmd *FakeMysqlDaemon) ReinitConfig(ctx context.Context, cnf *Mycnf) error {
return nil
}
// RefreshConfig is part of the MysqlDaemon interface
-func (fmd *FakeMysqlDaemon) RefreshConfig(ctx context.Context, cnf *mysqlctl.Mycnf) error {
+func (fmd *FakeMysqlDaemon) RefreshConfig(ctx context.Context, cnf *Mycnf) error {
return nil
}
// Wait is part of the MysqlDaemon interface.
-func (fmd *FakeMysqlDaemon) Wait(ctx context.Context, cnf *mysqlctl.Mycnf) error {
+func (fmd *FakeMysqlDaemon) Wait(ctx context.Context, cnf *Mycnf) error {
return nil
}
@@ -345,17 +353,22 @@ func (fmd *FakeMysqlDaemon) IsReadOnly() (bool, error) {
return fmd.ReadOnly, nil
}
+// IsSuperReadOnly is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) IsSuperReadOnly(ctx context.Context) (bool, error) {
+ return fmd.SuperReadOnly, nil
+}
+
// SetReadOnly is part of the MysqlDaemon interface
func (fmd *FakeMysqlDaemon) SetReadOnly(on bool) error {
fmd.ReadOnly = on
return nil
}
-// SetSuperReadOnly is part of the MysqlDaemon interface
-func (fmd *FakeMysqlDaemon) SetSuperReadOnly(on bool) error {
+// SetSuperReadOnly is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) SetSuperReadOnly(ctx context.Context, on bool) (ResetSuperReadOnlyFunc, error) {
fmd.SuperReadOnly = on
fmd.ReadOnly = on
- return nil
+ return nil, nil
}
// StartReplication is part of the MysqlDaemon interface.
@@ -431,7 +444,6 @@ func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host strin
if stopReplicationBefore {
cmds = append(cmds, "STOP SLAVE")
}
- cmds = append(cmds, "RESET SLAVE ALL")
cmds = append(cmds, "FAKE SET MASTER")
if startReplicationAfter {
cmds = append(cmds, "START SLAVE")
@@ -468,6 +480,11 @@ func (fmd *FakeMysqlDaemon) Promote(hookExtraEnv map[string]string) (mysql.Posit
return fmd.PromoteResult, nil
}
+// ExecuteSuperQuery is part of the MysqlDaemon interface
+func (fmd *FakeMysqlDaemon) ExecuteSuperQuery(ctx context.Context, query string) error {
+ return fmd.ExecuteSuperQueryList(ctx, []string{query})
+}
+
// ExecuteSuperQueryList is part of the MysqlDaemon interface
func (fmd *FakeMysqlDaemon) ExecuteSuperQueryList(ctx context.Context, queryList []string) error {
for _, query := range queryList {
@@ -643,6 +660,11 @@ func (fmd *FakeMysqlDaemon) SemiSyncClients() uint32 {
return 0
}
+// SemiSyncExtensionLoaded is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) SemiSyncExtensionLoaded() (bool, error) {
+ return true, nil
+}
+
// SemiSyncSettings is part of the MysqlDaemon interface.
func (fmd *FakeMysqlDaemon) SemiSyncSettings() (timeout uint64, numReplicas uint32) {
return 10000000, 1
@@ -663,3 +685,23 @@ func (fmd *FakeMysqlDaemon) GetVersionString() string {
func (fmd *FakeMysqlDaemon) GetVersionComment(ctx context.Context) string {
return ""
}
+
+// AcquireGlobalReadLock is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) AcquireGlobalReadLock(ctx context.Context) error {
+ if fmd.GlobalReadLock {
+ return errors.New("lock already acquired")
+ }
+
+ fmd.GlobalReadLock = true
+ return nil
+}
+
+// ReleaseGlobalReadLock is part of the MysqlDaemon interface.
+func (fmd *FakeMysqlDaemon) ReleaseGlobalReadLock(ctx context.Context) error {
+ if fmd.GlobalReadLock {
+ fmd.GlobalReadLock = false
+ return nil
+ }
+
+ return errors.New("no read locks acquired yet")
+}
diff --git a/go/vt/mysqlctl/mycnf_gen.go b/go/vt/mysqlctl/mycnf_gen.go
index 3024b3bb5e6..2ac47fe617b 100644
--- a/go/vt/mysqlctl/mycnf_gen.go
+++ b/go/vt/mysqlctl/mycnf_gen.go
@@ -59,7 +59,7 @@ var (
)
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
servenv.OnParseFor(cmd, registerMyCnfFlags)
}
}
diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go
index c4c76224b3b..185aac327ac 100644
--- a/go/vt/mysqlctl/mysql_daemon.go
+++ b/go/vt/mysqlctl/mysql_daemon.go
@@ -57,6 +57,7 @@ type MysqlDaemon interface {
GetGTIDPurged(ctx context.Context) (mysql.Position, error)
SetSemiSyncEnabled(source, replica bool) error
SemiSyncEnabled() (source, replica bool)
+ SemiSyncExtensionLoaded() (bool, error)
SemiSyncStatus() (source, replica bool)
SemiSyncClients() (count uint32)
SemiSyncSettings() (timeout uint64, numReplicas uint32)
@@ -69,8 +70,9 @@ type MysqlDaemon interface {
ResetReplication(ctx context.Context) error
PrimaryPosition() (mysql.Position, error)
IsReadOnly() (bool, error)
+ IsSuperReadOnly(ctx context.Context) (bool, error)
SetReadOnly(on bool) error
- SetSuperReadOnly(on bool) error
+ SetSuperReadOnly(ctx context.Context, on bool) (ResetSuperReadOnlyFunc, error)
SetReplicationPosition(ctx context.Context, pos mysql.Position) error
SetReplicationSource(ctx context.Context, host string, port int, stopReplicationBefore bool, startReplicationAfter bool) error
WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error
@@ -102,6 +104,9 @@ type MysqlDaemon interface {
// GetVersionComment returns the version comment
GetVersionComment(ctx context.Context) string
+ // ExecuteSuperQuery executes a single query, no result
+ ExecuteSuperQuery(ctx context.Context, query string) error
+
// ExecuteSuperQueryList executes a list of queries, no result
ExecuteSuperQueryList(ctx context.Context, queryList []string) error
@@ -114,6 +119,13 @@ type MysqlDaemon interface {
// DisableBinlogPlayback disable playback of binlog events
DisableBinlogPlayback() error
+ // AcquireGlobalReadLock acquires a global read lock and keeps the connection so
+ // as to release it with the function below.
+ AcquireGlobalReadLock(ctx context.Context) error
+
+ // ReleaseGlobalReadLock release a lock acquired with the connection from the above function.
+ ReleaseGlobalReadLock(ctx context.Context) error
+
// Close will close this instance of Mysqld. It will wait for all dba
// queries to be finished.
Close()
diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go
index 2738caed2bd..a3df3172aa5 100644
--- a/go/vt/mysqlctl/mysqld.go
+++ b/go/vt/mysqlctl/mysqld.go
@@ -87,9 +87,10 @@ const maxLogFileSampleSize = 4096
// Mysqld is the object that represents a mysqld daemon running on this server.
type Mysqld struct {
- dbcfgs *dbconfigs.DBConfigs
- dbaPool *dbconnpool.ConnectionPool
- appPool *dbconnpool.ConnectionPool
+ dbcfgs *dbconfigs.DBConfigs
+ dbaPool *dbconnpool.ConnectionPool
+ appPool *dbconnpool.ConnectionPool
+ lockConn *dbconnpool.PooledDBConnection
capabilities capabilitySet
@@ -100,23 +101,35 @@ type Mysqld struct {
}
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver"} {
servenv.OnParseFor(cmd, registerMySQLDFlags)
}
+ for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} {
+ servenv.OnParseFor(cmd, registerReparentFlags)
+ }
+ for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver"} {
+ servenv.OnParseFor(cmd, registerPoolFlags)
+ }
}
func registerMySQLDFlags(fs *pflag.FlagSet) {
- fs.BoolVar(&DisableActiveReparents, "disable_active_reparents", DisableActiveReparents, "if set, do not allow active reparents. Use this to protect a cluster using external reparents.")
- fs.IntVar(&dbaPoolSize, "dba_pool_size", dbaPoolSize, "Size of the connection pool for dba connections")
- fs.DurationVar(&DbaIdleTimeout, "dba_idle_timeout", DbaIdleTimeout, "Idle timeout for dba connections")
- fs.IntVar(&appPoolSize, "app_pool_size", appPoolSize, "Size of the connection pool for app connections")
- fs.DurationVar(&appIdleTimeout, "app_idle_timeout", appIdleTimeout, "Idle timeout for app connections")
fs.DurationVar(&PoolDynamicHostnameResolution, "pool_hostname_resolve_interval", PoolDynamicHostnameResolution, "if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)")
fs.StringVar(&mycnfTemplateFile, "mysqlctl_mycnf_template", mycnfTemplateFile, "template file to use for generating the my.cnf file during server init")
fs.StringVar(&socketFile, "mysqlctl_socket", socketFile, "socket file to use for remote mysqlctl actions (empty for local actions)")
fs.DurationVar(&replicationConnectRetry, "replication_connect_retry", replicationConnectRetry, "how long to wait in between replica reconnect attempts. Only precise to the second.")
}
+func registerReparentFlags(fs *pflag.FlagSet) {
+ fs.BoolVar(&DisableActiveReparents, "disable_active_reparents", DisableActiveReparents, "if set, do not allow active reparents. Use this to protect a cluster using external reparents.")
+}
+
+func registerPoolFlags(fs *pflag.FlagSet) {
+ fs.IntVar(&dbaPoolSize, "dba_pool_size", dbaPoolSize, "Size of the connection pool for dba connections")
+ fs.DurationVar(&DbaIdleTimeout, "dba_idle_timeout", DbaIdleTimeout, "Idle timeout for dba connections")
+ fs.DurationVar(&appIdleTimeout, "app_idle_timeout", appIdleTimeout, "Idle timeout for app connections")
+ fs.IntVar(&appPoolSize, "app_pool_size", appPoolSize, "Size of the connection pool for app connections")
+}
+
// NewMysqld creates a Mysqld object based on the provided configuration
// and connection parameters.
func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld {
diff --git a/go/vt/mysqlctl/mysqlshellbackupengine.go b/go/vt/mysqlctl/mysqlshellbackupengine.go
new file mode 100644
index 00000000000..ecd4c080eba
--- /dev/null
+++ b/go/vt/mysqlctl/mysqlshellbackupengine.go
@@ -0,0 +1,575 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctl
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spf13/pflag"
+
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/mysqlctl/backupstorage"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vterrors"
+)
+
+var (
+ // location to store the mysql shell backup
+ mysqlShellBackupLocation = ""
+ // flags passed to the mysql shell utility, used both on dump/restore
+ mysqlShellFlags = "--defaults-file=/dev/null --js -h localhost"
+ // flags passed to the Dump command, as a JSON string
+ mysqlShellDumpFlags = `{"threads": 4}`
+ // flags passed to the Load command, as a JSON string
+ mysqlShellLoadFlags = `{"threads": 4, "loadUsers": true, "updateGtidSet": "replace", "skipBinlog": true, "progressFile": ""}`
+ // drain a tablet when taking a backup
+ mysqlShellBackupShouldDrain = false
+ // disable redo logging and double write buffer
+ mysqlShellSpeedUpRestore = false
+
+ mysqlShellBackupBinaryName = "mysqlsh"
+
+ // use when checking if we need to create the directory on the local filesystem or not.
+ knownObjectStoreParams = []string{"s3BucketName", "osBucketName", "azureContainerName"}
+
+ MySQLShellPreCheckError = errors.New("MySQLShellPreCheckError")
+
+ // internal databases not backed up by MySQL Shell
+ internalDBs = []string{
+ "information_schema", "mysql", "ndbinfo", "performance_schema", "sys",
+ }
+ // reserved MySQL users https://dev.mysql.com/doc/refman/8.0/en/reserved-accounts.html
+ reservedUsers = []string{
+ "mysql.sys@localhost", "mysql.session@localhost", "mysql.infoschema@localhost",
+ }
+)
+
+// MySQLShellBackupManifest represents a backup.
+type MySQLShellBackupManifest struct {
+ // BackupManifest is an anonymous embedding of the base manifest struct.
+ // Note that the manifest itself doesn't fill the Position field, as we have
+ // no way of fetching that information from mysqlsh at the moment.
+ BackupManifest
+
+ // Location of the backup directory
+ BackupLocation string
+ // Params are the parameters that backup was created with
+ Params string
+}
+
+func init() {
+ BackupRestoreEngineMap[mysqlShellBackupEngineName] = &MySQLShellBackupEngine{}
+
+ for _, cmd := range []string{"vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctldclient"} {
+ servenv.OnParseFor(cmd, registerMysqlShellBackupEngineFlags)
+ }
+}
+
+func registerMysqlShellBackupEngineFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&mysqlShellBackupLocation, "mysql-shell-backup-location", mysqlShellBackupLocation, "location where the backup will be stored")
+ fs.StringVar(&mysqlShellFlags, "mysql-shell-flags", mysqlShellFlags, "execution flags to pass to mysqlsh binary to be used during dump/load")
+ fs.StringVar(&mysqlShellDumpFlags, "mysql-shell-dump-flags", mysqlShellDumpFlags, "flags to pass to mysql shell dump utility. This should be a JSON string and will be saved in the MANIFEST")
+ fs.StringVar(&mysqlShellLoadFlags, "mysql-shell-load-flags", mysqlShellLoadFlags, "flags to pass to mysql shell load utility. This should be a JSON string")
+ fs.BoolVar(&mysqlShellBackupShouldDrain, "mysql-shell-should-drain", mysqlShellBackupShouldDrain, "decide if we should drain while taking a backup or continue to serving traffic")
+ fs.BoolVar(&mysqlShellSpeedUpRestore, "mysql-shell-speedup-restore", mysqlShellSpeedUpRestore, "speed up restore by disabling redo logging and double write buffer during the restore process")
+}
+
+// MySQLShellBackupEngine encapsulates the logic to implement the restoration
+// of a mysql-shell based backup.
+type MySQLShellBackupEngine struct {
+}
+
+const (
+ mysqlShellBackupEngineName = "mysqlshell"
+ mysqlShellLockMessage = "Global read lock has been released"
+)
+
+func (be *MySQLShellBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (result bool, finalErr error) {
+ params.Logger.Infof("Starting ExecuteBackup in %s", params.TabletAlias)
+
+ location := path.Join(mysqlShellBackupLocation, bh.Directory(), bh.Name())
+
+ err := be.backupPreCheck(location)
+ if err != nil {
+ return false, vterrors.Wrap(err, "failed backup precheck")
+ }
+
+ args := []string{}
+ if mysqlShellFlags != "" {
+ args = append(args, strings.Fields(mysqlShellFlags)...)
+ }
+
+ args = append(args, "-e", fmt.Sprintf("util.dumpInstance(%q, %s)",
+ location,
+ mysqlShellDumpFlags,
+ ))
+
+ // to be able to get the consistent GTID sets, we will acquire a global read lock before starting mysql shell.
+ // oncce we have the lock, we start it and wait unti it has acquired and release its global read lock, which
+ // should guarantee that both use and mysql shell are seeing the same executed GTID sets.
+ // after this we release the lock so that replication can continue. this usually should take just a few seconds.
+ params.Logger.Infof("acquiring a global read lock before fetching the executed GTID sets")
+ err = params.Mysqld.AcquireGlobalReadLock(ctx)
+ if err != nil {
+ return false, vterrors.Wrap(err, "failed to acquire read lock to start backup")
+ }
+ lockAcquired := time.Now() // we will report how long we hold the lock for
+
+ // we need to release the global read lock in case the backup fails to start and
+ // the lock wasn't released by releaseReadLock() yet. context might be expired,
+ // so we pass a new one.
+ defer func() { _ = params.Mysqld.ReleaseGlobalReadLock(context.Background()) }()
+
+ posBeforeBackup, err := params.Mysqld.PrimaryPosition()
+ if err != nil {
+ return false, vterrors.Wrap(err, "failed to fetch position")
+ }
+
+ cmd := exec.CommandContext(ctx, mysqlShellBackupBinaryName, args...)
+
+ params.Logger.Infof("running %s", cmd.String())
+
+ cmdOut, err := cmd.StdoutPipe()
+ if err != nil {
+ return false, vterrors.Wrap(err, "cannot create stdout pipe")
+ }
+ cmdOriginalErr, err := cmd.StderrPipe()
+ if err != nil {
+ return false, vterrors.Wrap(err, "cannot create stderr pipe")
+ }
+ if err := cmd.Start(); err != nil {
+ return false, vterrors.Wrap(err, "can't start mysqlshell")
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+ cmdErr := io.TeeReader(cmdOriginalErr, pipeWriter)
+
+ cmdWg := &sync.WaitGroup{}
+ cmdWg.Add(3)
+ go releaseReadLock(ctx, pipeReader, params, cmdWg, lockAcquired)
+ go scanLinesToLogger(mysqlShellBackupEngineName+" stdout", cmdOut, params.Logger, cmdWg.Done)
+ go scanLinesToLogger(mysqlShellBackupEngineName+" stderr", cmdErr, params.Logger, cmdWg.Done)
+
+ // Get exit status.
+ if err := cmd.Wait(); err != nil {
+ pipeWriter.Close() // make sure we close the writer so the goroutines above will complete.
+ return false, vterrors.Wrap(err, mysqlShellBackupEngineName+" failed")
+ }
+
+ // close the pipeWriter and wait for the goroutines to have read all the logs
+ pipeWriter.Close()
+ cmdWg.Wait()
+
+ // open the MANIFEST
+ params.Logger.Infof("Writing backup MANIFEST")
+ mwc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown)
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName)
+ }
+ defer closeFile(mwc, backupManifestFileName, params.Logger, &finalErr)
+
+ // JSON-encode and write the MANIFEST
+ bm := &MySQLShellBackupManifest{
+ // Common base fields
+ BackupManifest: BackupManifest{
+ BackupMethod: mysqlShellBackupEngineName,
+ // the position is empty here because we have no way of capturing it from mysqlsh
+ // we will capture it when doing the restore as mysqlsh can replace the GTIDs with
+ // what it has stored in the backup.
+ Position: posBeforeBackup,
+ // PurgedPosition: posBeforeBackup,
+ BackupTime: FormatRFC3339(params.BackupTime.UTC()),
+ FinishedTime: FormatRFC3339(time.Now().UTC()),
+ // ServerUUID: serverUUID,
+ // TabletAlias: params.TabletAlias,
+ // Keyspace: params.Keyspace,
+ // Shard: params.Shard,
+ // MySQLVersion: mysqlVersion,
+ // UpgradeSafe: true,
+ },
+
+ // mysql shell backup specific fields
+ BackupLocation: location,
+ Params: mysqlShellLoadFlags,
+ }
+
+ data, err := json.MarshalIndent(bm, "", " ")
+ if err != nil {
+ return false, vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName)
+ }
+ if _, err := mwc.Write([]byte(data)); err != nil {
+ return false, vterrors.Wrapf(err, "cannot write %v", backupManifestFileName)
+ }
+
+ params.Logger.Infof("Backup completed")
+ return true, nil
+}
+
+func (be *MySQLShellBackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) {
+ params.Logger.Infof("Calling ExecuteRestore for %s (DeleteBeforeRestore: %v)", params.DbName, params.DeleteBeforeRestore)
+
+ shouldDeleteUsers, err := be.restorePreCheck(ctx, params)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "failed restore precheck")
+ }
+
+ var bm MySQLShellBackupManifest
+ if err := getBackupManifestInto(ctx, bh, &bm); err != nil {
+ return nil, err
+ }
+
+ // mark restore as in progress
+ if err := createStateFile(params.Cnf); err != nil {
+ return nil, err
+ }
+
+ // make sure semi-sync is disabled, otherwise we will wait forever for acknowledgements
+ err = params.Mysqld.SetSemiSyncEnabled(false, false)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "disable semi-sync failed")
+ }
+
+ params.Logger.Infof("restoring on an existing tablet, so dropping database %q", params.DbName)
+
+ readonly, err := params.Mysqld.IsSuperReadOnly(ctx)
+ if err != nil {
+ return nil, vterrors.Wrap(err, fmt.Sprintf("checking if mysqld has super_read_only=enable: %v", err))
+ }
+
+ if readonly {
+ resetFunc, err := params.Mysqld.SetSuperReadOnly(ctx, false)
+ if err != nil {
+ return nil, vterrors.Wrap(err, fmt.Sprintf("unable to disable super-read-only: %v", err))
+ }
+
+ defer func() {
+ err := resetFunc()
+ if err != nil {
+ params.Logger.Errorf("Not able to set super_read_only to its original value after restore")
+ }
+ }()
+ }
+
+ err = cleanupMySQL(ctx, params, shouldDeleteUsers)
+ if err != nil {
+ log.Errorf(err.Error())
+ // time.Sleep(time.Minute * 2)
+ return nil, vterrors.Wrap(err, "error cleaning MySQL")
+ }
+
+ // we need to get rid of all the current replication information on the host.
+ err = params.Mysqld.ResetReplication(ctx)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "unable to reset replication")
+ }
+
+ // this is required so we can load the backup generated by MySQL Shell. we will disable it afterwards.
+ err = params.Mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL LOCAL_INFILE=1")
+ if err != nil {
+ return nil, vterrors.Wrap(err, "unable to set local_infile=1")
+ }
+
+ if mysqlShellSpeedUpRestore {
+ // disable redo logging and double write buffer if we are configured to do so.
+ err = params.Mysqld.ExecuteSuperQuery(ctx, "ALTER INSTANCE DISABLE INNODB REDO_LOG")
+ if err != nil {
+ return nil, vterrors.Wrap(err, "unable to disable REDO_LOG")
+ }
+ params.Logger.Infof("Disabled REDO_LOG")
+
+ defer func() { // re-enable once we are done with the restore.
+ err := params.Mysqld.ExecuteSuperQuery(ctx, "ALTER INSTANCE ENABLE INNODB REDO_LOG")
+ if err != nil {
+ params.Logger.Errorf("unable to re-enable REDO_LOG: %v", err)
+ } else {
+ params.Logger.Infof("Disabled REDO_LOG")
+ }
+ }()
+ }
+
+ // we need to disable SuperReadOnly otherwise we won't be able to restore the backup properly.
+ // once the backups is complete, we will restore it to its previous state.
+ resetFunc, err := be.handleSuperReadOnly(ctx, params)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "unable to disable super-read-only")
+ }
+ defer resetFunc()
+
+ args := []string{}
+
+ if mysqlShellFlags != "" {
+ args = append(args, strings.Fields(mysqlShellFlags)...)
+ }
+
+ args = append(args, "-e", fmt.Sprintf("util.loadDump(%q, %s)",
+ bm.BackupLocation,
+ mysqlShellLoadFlags,
+ ))
+
+ cmd := exec.CommandContext(ctx, "mysqlsh", args...)
+
+ params.Logger.Infof("running %s", cmd.String())
+
+ cmdOut, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, vterrors.Wrap(err, "cannot create stdout pipe")
+ }
+ cmdErr, err := cmd.StderrPipe()
+ if err != nil {
+ return nil, vterrors.Wrap(err, "cannot create stderr pipe")
+ }
+ if err := cmd.Start(); err != nil {
+ return nil, vterrors.Wrap(err, "can't start xbstream")
+ }
+
+ cmdWg := &sync.WaitGroup{}
+ cmdWg.Add(2)
+ go scanLinesToLogger(mysqlShellBackupEngineName+" stdout", cmdOut, params.Logger, cmdWg.Done)
+ go scanLinesToLogger(mysqlShellBackupEngineName+" stderr", cmdErr, params.Logger, cmdWg.Done)
+ cmdWg.Wait()
+
+ // Get the exit status.
+ if err := cmd.Wait(); err != nil {
+ return nil, vterrors.Wrap(err, mysqlShellBackupEngineName+" failed")
+ }
+ params.Logger.Infof("%s completed successfully", mysqlShellBackupBinaryName)
+
+ // disable local_infile now that the restore is done.
+ err = params.Mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL LOCAL_INFILE=0")
+ if err != nil {
+ return nil, vterrors.Wrap(err, "unable to set local_infile=0")
+ }
+ params.Logger.Infof("set local_infile=0")
+
+ params.Logger.Infof("Restore completed")
+
+ return &bm.BackupManifest, nil
+}
+
+// ShouldDrainForBackup satisfies the BackupEngine interface
+// MySQL Shell backups can be taken while MySQL is running so we can control this via a flag.
+func (be *MySQLShellBackupEngine) ShouldDrainForBackup() bool {
+ return mysqlShellBackupShouldDrain
+}
+
+// ShouldStartMySQLAfterRestore signifies if this backup engine needs to restart MySQL once the restore is completed.
+// Since MySQL Shell operates on a live MySQL instance, there is no need to start it once the restore is completed
+func (be *MySQLShellBackupEngine) ShouldStartMySQLAfterRestore() bool {
+ return false
+}
+
+func (be *MySQLShellBackupEngine) Name() string { return mysqlShellBackupEngineName }
+
+func (be *MySQLShellBackupEngine) backupPreCheck(location string) error {
+ if mysqlShellBackupLocation == "" {
+ return fmt.Errorf("%w: no backup location set via --mysql-shell-backup-location", MySQLShellPreCheckError)
+ }
+
+ if mysqlShellFlags == "" || !strings.Contains(mysqlShellFlags, "--js") {
+ return fmt.Errorf("%w: at least the --js flag is required in the value of the flag --mysql-shell-flags", MySQLShellPreCheckError)
+ }
+
+ // make sure the targe directory exists if the target location for the backup is not an object store
+ // (e.g. is the local filesystem) as MySQL Shell doesn't create the entire path beforehand:
+ isObjectStorage := false
+ for _, objStore := range knownObjectStoreParams {
+ if strings.Contains(mysqlShellDumpFlags, objStore) {
+ isObjectStorage = true
+ break
+ }
+ }
+
+ if !isObjectStorage {
+ err := os.MkdirAll(location, 0o750)
+ if err != nil {
+ return fmt.Errorf("failure creating directory %s: %w", location, err)
+ }
+ }
+
+ return nil
+}
+
+func (be *MySQLShellBackupEngine) restorePreCheck(ctx context.Context, params RestoreParams) (shouldDeleteUsers bool, err error) {
+ if mysqlShellFlags == "" {
+ return shouldDeleteUsers, fmt.Errorf("%w: at least the --js flag is required in the value of the flag --mysql-shell-flags", MySQLShellPreCheckError)
+ }
+
+ loadFlags := map[string]interface{}{}
+ err = json.Unmarshal([]byte(mysqlShellLoadFlags), &loadFlags)
+ if err != nil {
+ return false, fmt.Errorf("%w: unable to parse JSON of load flags", MySQLShellPreCheckError)
+ }
+
+ if val, ok := loadFlags["updateGtidSet"]; !ok || val != "replace" {
+ return false, fmt.Errorf("%w: mysql-shell needs to restore with updateGtidSet set to \"replace\" to work with Vitess", MySQLShellPreCheckError)
+ }
+
+ if val, ok := loadFlags["progressFile"]; !ok || val != "" {
+ return false, fmt.Errorf("%w: \"progressFile\" needs to be empty as vitess always starts a restore from scratch", MySQLShellPreCheckError)
+ }
+
+ if val, ok := loadFlags["skipBinlog"]; !ok || val != true {
+ return false, fmt.Errorf("%w: \"skipBinlog\" needs to set to true", MySQLShellPreCheckError)
+ }
+
+ if val, ok := loadFlags["loadUsers"]; ok && val == true {
+ shouldDeleteUsers = true
+ }
+
+ return shouldDeleteUsers, nil
+}
+
+func (be *MySQLShellBackupEngine) handleSuperReadOnly(ctx context.Context, params RestoreParams) (func(), error) {
+ readonly, err := params.Mysqld.IsSuperReadOnly(ctx)
+ if err != nil {
+ return nil, vterrors.Wrap(err, fmt.Sprintf("checking if mysqld has super_read_only=enable: %v", err))
+ }
+
+ params.Logger.Infof("Is Super Read Only: %v", readonly)
+
+ if readonly {
+ resetFunc, err := params.Mysqld.SetSuperReadOnly(ctx, false)
+ if err != nil {
+ return nil, vterrors.Wrap(err, fmt.Sprintf("unable to disable super-read-only: %v", err))
+ }
+
+ return func() {
+ err := resetFunc()
+ if err != nil {
+ params.Logger.Errorf("Not able to set super_read_only to its original value after restore")
+ }
+ }, nil
+ }
+
+ return func() {}, nil
+}
+
+// releaseReadLock will keep reading the MySQL Shell STDERR waiting until the point it has acquired its lock
+func releaseReadLock(ctx context.Context, reader io.Reader, params BackupParams, wg *sync.WaitGroup, lockAcquired time.Time) {
+ defer wg.Done()
+
+ scanner := bufio.NewScanner(reader)
+ released := false
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if !released {
+
+ if !strings.Contains(line, mysqlShellLockMessage) {
+ continue
+ }
+ released = true
+
+ params.Logger.Infof("mysql shell released its global read lock, doing the same")
+
+ err := params.Mysqld.ReleaseGlobalReadLock(ctx)
+ if err != nil {
+ params.Logger.Errorf("unable to release global read lock: %v", err)
+ }
+
+ params.Logger.Infof("global read lock released after %v", time.Since(lockAcquired))
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ params.Logger.Errorf("error reading from reader: %v", err)
+ }
+
+ if !released {
+ params.Logger.Errorf("could not release global lock earlier")
+ }
+}
+
+func cleanupMySQL(ctx context.Context, params RestoreParams, shouldDeleteUsers bool) error {
+ params.Logger.Infof("Cleaning up MySQL ahead of a restore")
+ result, err := params.Mysqld.FetchSuperQuery(ctx, "SHOW DATABASES")
+ if err != nil {
+ return err
+ }
+
+ // drop all databases
+ for _, row := range result.Rows {
+ dbName := row[0].ToString()
+ if sliceContains(internalDBs, dbName) {
+ continue // not dropping internal DBs
+ }
+
+ params.Logger.Infof("Dropping DB %q", dbName)
+ err = params.Mysqld.ExecuteSuperQuery(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", row[0].ToString()))
+ if err != nil {
+ return fmt.Errorf("error droppping database %q: %w", row[0].ToString(), err)
+ }
+ }
+
+ if shouldDeleteUsers {
+ // get current user
+ var currentUser string
+ result, err = params.Mysqld.FetchSuperQuery(ctx, "SELECT user()")
+ if err != nil {
+ return fmt.Errorf("error fetching current user: %w", err)
+ }
+
+ for _, row := range result.Rows {
+ currentUser = row[0].ToString()
+ }
+
+ // drop all users except reserved ones
+ result, err = params.Mysqld.FetchSuperQuery(ctx, "SELECT user, host FROM mysql.user")
+ if err != nil {
+ return err
+ }
+
+ for _, row := range result.Rows {
+ user := fmt.Sprintf("%s@%s", row[0].ToString(), row[1].ToString())
+
+ if user == currentUser {
+ continue // we don't drop the current user
+ }
+ if sliceContains(reservedUsers, user) {
+ continue // we skip reserved MySQL users
+ }
+
+ params.Logger.Infof("Dropping User %q", user)
+ err = params.Mysqld.ExecuteSuperQuery(ctx, fmt.Sprintf("DROP USER '%s'@'%s'", row[0].ToString(), row[1].ToString()))
+ if err != nil {
+ return fmt.Errorf("error droppping user %q: %w", user, err)
+ }
+ }
+ }
+
+ return err
+}
+
+func sliceContains[S ~[]E, E comparable](s S, v E) bool {
+ for _, item := range s {
+ if item == v {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/go/vt/mysqlctl/mysqlshellbackupengine_test.go b/go/vt/mysqlctl/mysqlshellbackupengine_test.go
new file mode 100644
index 00000000000..ceacab49d60
--- /dev/null
+++ b/go/vt/mysqlctl/mysqlshellbackupengine_test.go
@@ -0,0 +1,433 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysqlctl
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/ioutil"
+ "vitess.io/vitess/go/mysql/fakesqldb"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/logutil"
+)
+
+func TestMySQLShellBackupBackupPreCheck(t *testing.T) {
+ originalLocation := mysqlShellBackupLocation
+ originalFlags := mysqlShellFlags
+ defer func() {
+ mysqlShellBackupLocation = originalLocation
+ mysqlShellFlags = originalFlags
+ }()
+
+ engine := MySQLShellBackupEngine{}
+ tests := []struct {
+ name string
+ location string
+ flags string
+ err error
+ }{
+ {
+ "empty flags",
+ "",
+ `{}`,
+ MySQLShellPreCheckError,
+ },
+ {
+ "only location",
+ "/dev/null",
+ "",
+ MySQLShellPreCheckError,
+ },
+ {
+ "only flags",
+ "",
+ "--js",
+ MySQLShellPreCheckError,
+ },
+ {
+ "both values present but without --js",
+ "",
+ "-h localhost",
+ MySQLShellPreCheckError,
+ },
+ {
+ "supported values",
+ t.TempDir(),
+ "--js -h localhost",
+ nil,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ mysqlShellBackupLocation = tt.location
+ mysqlShellFlags = tt.flags
+ assert.ErrorIs(t, engine.backupPreCheck(path.Join(mysqlShellBackupLocation, "test")), tt.err)
+ })
+ }
+
+}
+
+func TestMySQLShellBackupRestorePreCheck(t *testing.T) {
+ original := mysqlShellLoadFlags
+ defer func() { mysqlShellLoadFlags = original }()
+
+ engine := MySQLShellBackupEngine{}
+ tests := []struct {
+ name string
+ flags string
+ err error
+ shouldDeleteUsers bool
+ }{
+ {
+ "empty load flags",
+ `{}`,
+ MySQLShellPreCheckError,
+ false,
+ },
+ {
+ "only updateGtidSet",
+ `{"updateGtidSet": "replace"}`,
+ MySQLShellPreCheckError,
+ false,
+ },
+ {
+ "only progressFile",
+ `{"progressFile": ""}`,
+ MySQLShellPreCheckError,
+ false,
+ },
+ {
+ "both values but unsupported values",
+ `{"updateGtidSet": "append", "progressFile": "/tmp/test1"}`,
+ MySQLShellPreCheckError,
+ false,
+ },
+ {
+ "supported values",
+ `{"updateGtidSet": "replace", "progressFile": "", "skipBinlog": true, "loadUsers": false}`,
+ nil,
+ false,
+ },
+ {
+ "should delete users",
+ `{"updateGtidSet": "replace", "progressFile": "", "skipBinlog": true, "loadUsers": true}`,
+ nil,
+ true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mysqlShellLoadFlags = tt.flags
+ shouldDeleteUsers, err := engine.restorePreCheck(context.Background(), RestoreParams{})
+ assert.ErrorIs(t, err, tt.err)
+ assert.Equal(t, tt.shouldDeleteUsers, shouldDeleteUsers)
+ })
+ }
+
+}
+
+func TestShouldDrainForBackupMySQLShell(t *testing.T) {
+ original := mysqlShellBackupShouldDrain
+ defer func() { mysqlShellBackupShouldDrain = original }()
+
+ engine := MySQLShellBackupEngine{}
+
+ mysqlShellBackupShouldDrain = false
+
+ assert.False(t, engine.ShouldDrainForBackup())
+
+ mysqlShellBackupShouldDrain = true
+
+ assert.True(t, engine.ShouldDrainForBackup())
+}
+
+func TestCleanupMySQL(t *testing.T) {
+ type userRecord struct {
+ user, host string
+ }
+
+ tests := []struct {
+ name string
+ existingDBs []string
+ expectedDropDBs []string
+ currentUser string
+ existingUsers []userRecord
+ expectedDropUsers []string
+ shouldDeleteUsers bool
+ }{
+ {
+ name: "testing only specific DBs",
+ existingDBs: []string{"_vt", "vt_test"},
+ expectedDropDBs: []string{"_vt", "vt_test"},
+ },
+ {
+ name: "testing with internal dbs",
+ existingDBs: []string{"_vt", "mysql", "vt_test", "performance_schema"},
+ expectedDropDBs: []string{"_vt", "vt_test"},
+ },
+ {
+ name: "with users but without delete",
+ existingDBs: []string{"_vt", "mysql", "vt_test", "performance_schema"},
+ expectedDropDBs: []string{"_vt", "vt_test"},
+ existingUsers: []userRecord{
+ {"test", "localhost"},
+ {"app", "10.0.0.1"},
+ },
+ expectedDropUsers: []string{},
+ shouldDeleteUsers: false,
+ },
+ {
+ name: "with users and delete",
+ existingDBs: []string{"_vt", "mysql", "vt_test", "performance_schema"},
+ expectedDropDBs: []string{"_vt", "vt_test"},
+ existingUsers: []userRecord{
+ {"test", "localhost"},
+ {"app", "10.0.0.1"},
+ },
+ expectedDropUsers: []string{"'test'@'localhost'", "'app'@'10.0.0.1'"},
+ shouldDeleteUsers: true,
+ },
+ {
+ name: "with reserved users",
+ existingDBs: []string{"_vt", "mysql", "vt_test", "performance_schema"},
+ expectedDropDBs: []string{"_vt", "vt_test"},
+ existingUsers: []userRecord{
+ {"mysql.sys", "localhost"},
+ {"mysql.infoschema", "localhost"},
+ {"mysql.session", "localhost"},
+ {"test", "localhost"},
+ {"app", "10.0.0.1"},
+ },
+ expectedDropUsers: []string{"'test'@'localhost'", "'app'@'10.0.0.1'"},
+ shouldDeleteUsers: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fakedb := fakesqldb.New(t)
+ defer fakedb.Close()
+ mysql := NewFakeMysqlDaemon(fakedb)
+ defer mysql.Close()
+
+ databases := [][]sqltypes.Value{}
+ for _, db := range tt.existingDBs {
+ databases = append(databases, []sqltypes.Value{sqltypes.NewVarChar(db)})
+ }
+
+ users := [][]sqltypes.Value{}
+ for _, record := range tt.existingUsers {
+ users = append(users, []sqltypes.Value{sqltypes.NewVarChar(record.user), sqltypes.NewVarChar(record.host)})
+ }
+
+ mysql.FetchSuperQueryMap = map[string]*sqltypes.Result{
+ "SHOW DATABASES": {Rows: databases},
+ "SELECT user()": {Rows: [][]sqltypes.Value{{sqltypes.NewVarChar(tt.currentUser)}}},
+ "SELECT user, host FROM mysql.user": {Rows: users},
+ }
+
+ for _, drop := range tt.expectedDropDBs {
+ mysql.ExpectedExecuteSuperQueryList = append(mysql.ExpectedExecuteSuperQueryList,
+ fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", drop),
+ )
+ }
+
+ if tt.shouldDeleteUsers {
+ for _, drop := range tt.expectedDropUsers {
+ mysql.ExpectedExecuteSuperQueryList = append(mysql.ExpectedExecuteSuperQueryList,
+ fmt.Sprintf("DROP USER %s", drop),
+ )
+ }
+ }
+
+ params := RestoreParams{
+ Mysqld: mysql,
+ Logger: logutil.NewMemoryLogger(),
+ }
+
+ err := cleanupMySQL(context.Background(), params, tt.shouldDeleteUsers)
+ require.NoError(t, err)
+
+ require.Equal(t, len(tt.expectedDropDBs)+len(tt.expectedDropUsers), mysql.ExpectedExecuteSuperQueryCurrent,
+ "unexpected number of queries executed")
+ })
+ }
+
+}
+
+// this is a helper to write files in a temporary directory
+func generateTestFile(t *testing.T, name, contents string) {
+ f, err := os.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0700)
+ require.NoError(t, err)
+ defer f.Close()
+ _, err = f.WriteString(contents)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+}
+
+// This tests if we are properly releasing the global read lock we acquire
+// during ExecuteBackup(), even if the backup didn't succeed.
+func TestMySQLShellBackupEngine_ExecuteBackup_ReleaseLock(t *testing.T) {
+ originalLocation := mysqlShellBackupLocation
+ originalBinary := mysqlShellBackupBinaryName
+ mysqlShellBackupLocation = "logical"
+ mysqlShellBackupBinaryName = path.Join(t.TempDir(), "test.sh")
+
+ defer func() { // restore the original values.
+ mysqlShellBackupLocation = originalLocation
+ mysqlShellBackupBinaryName = originalBinary
+ }()
+
+ logger := logutil.NewMemoryLogger()
+ fakedb := fakesqldb.New(t)
+ defer fakedb.Close()
+ mysql := NewFakeMysqlDaemon(fakedb)
+ defer mysql.Close()
+
+ be := &MySQLShellBackupEngine{}
+ params := BackupParams{
+ TabletAlias: "test",
+ Logger: logger,
+ Mysqld: mysql,
+ }
+ bs := FakeBackupStorage{
+ StartBackupReturn: FakeBackupStorageStartBackupReturn{},
+ }
+
+ t.Run("lock released if we see the mysqlsh lock being acquired", func(t *testing.T) {
+ logger.Clear()
+ manifestBuffer := ioutil.NewBytesBufferWriter()
+ bs.StartBackupReturn.BackupHandle = &FakeBackupHandle{
+ Dir: t.TempDir(),
+ AddFileReturn: FakeBackupHandleAddFileReturn{WriteCloser: manifestBuffer},
+ }
+
+ // this simulates mysql shell completing without any issues.
+ generateTestFile(t, mysqlShellBackupBinaryName, fmt.Sprintf("#!/bin/bash\n>&2 echo %s", mysqlShellLockMessage))
+
+ bh, err := bs.StartBackup(context.Background(), t.TempDir(), t.Name())
+ require.NoError(t, err)
+
+ _, err = be.ExecuteBackup(context.Background(), params, bh)
+ require.NoError(t, err)
+ require.False(t, mysql.GlobalReadLock) // lock must be released.
+
+ // check the manifest is valid.
+ var manifest MySQLShellBackupManifest
+ err = json.Unmarshal(manifestBuffer.Bytes(), &manifest)
+ require.NoError(t, err)
+
+ require.Equal(t, mysqlShellBackupEngineName, manifest.BackupMethod)
+
+ // did we notice the lock was release and did we release it ours as well?
+ require.Contains(t, logger.String(), "global read lock released after",
+ "failed to release the global lock after mysqlsh")
+ })
+
+ t.Run("lock released if when we don't see mysqlsh released it", func(t *testing.T) {
+ mysql.GlobalReadLock = false // clear lock status.
+ logger.Clear()
+ manifestBuffer := ioutil.NewBytesBufferWriter()
+ bs.StartBackupReturn.BackupHandle = &FakeBackupHandle{
+ Dir: t.TempDir(),
+ AddFileReturn: FakeBackupHandleAddFileReturn{WriteCloser: manifestBuffer},
+ }
+
+ // this simulates mysqlshell completing, but we don't see the message that is released its lock.
+ generateTestFile(t, mysqlShellBackupBinaryName, "#!/bin/bash\nexit 0")
+
+ bh, err := bs.StartBackup(context.Background(), t.TempDir(), t.Name())
+ require.NoError(t, err)
+
+ // in this case the backup was successful, but even if we didn't see mysqlsh release its lock
+ // we make sure it is released at the end.
+ _, err = be.ExecuteBackup(context.Background(), params, bh)
+ require.NoError(t, err)
+ require.False(t, mysql.GlobalReadLock) // lock must be released.
+
+ // make sure we are at least logging the lock wasn't able to be released earlier.
+ require.Contains(t, logger.String(), "could not release global lock earlier",
+ "failed to log error message when unable to release lock during backup")
+ })
+
+ t.Run("lock released when backup fails", func(t *testing.T) {
+ mysql.GlobalReadLock = false // clear lock status.
+ logger.Clear()
+ manifestBuffer := ioutil.NewBytesBufferWriter()
+ bs.StartBackupReturn.BackupHandle = &FakeBackupHandle{
+ Dir: t.TempDir(),
+ AddFileReturn: FakeBackupHandleAddFileReturn{WriteCloser: manifestBuffer},
+ }
+
+ // this simulates the backup process failing.
+ generateTestFile(t, mysqlShellBackupBinaryName, "#!/bin/bash\nexit 1")
+
+ bh, err := bs.StartBackup(context.Background(), t.TempDir(), t.Name())
+ require.NoError(t, err)
+
+ _, err = be.ExecuteBackup(context.Background(), params, bh)
+ require.ErrorContains(t, err, "mysqlshell failed")
+ require.False(t, mysql.GlobalReadLock) // lock must be released.
+ })
+
+}
+
+func Test_sliceContains(t *testing.T) {
+ tests := []struct {
+ slice []any
+ value any
+ want bool
+ }{
+ {
+ []any{"apple", "banana", "cherry"},
+ "apple",
+ true,
+ },
+ {
+ []any{"apple", "banana", "cherry"},
+ "banana",
+ true,
+ },
+ {
+ []any{"apple", "banana", "cherry"},
+ "cherry",
+ true,
+ },
+ {
+ []any{"apple", "banana", "cherry"},
+ "dragonfruit",
+ false,
+ },
+ }
+
+ for i, tt := range tests {
+ t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
+ assert.Equal(t, tt.want, sliceContains(tt.slice, tt.value))
+ })
+ }
+}
diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go
index 311828b4535..69799e7c717 100644
--- a/go/vt/mysqlctl/query.go
+++ b/go/vt/mysqlctl/query.go
@@ -17,6 +17,7 @@ limitations under the License.
package mysqlctl
import (
+ "errors"
"fmt"
"strings"
"time"
@@ -228,6 +229,42 @@ func (mysqld *Mysqld) fetchStatuses(ctx context.Context, pattern string) (map[st
return varMap, nil
}
+// ExecuteSuperQuery allows the user to execute a query as a super user.
+func (mysqld *Mysqld) AcquireGlobalReadLock(ctx context.Context) error {
+ if mysqld.lockConn != nil {
+ return errors.New("lock already acquired")
+ }
+
+ conn, err := getPoolReconnect(ctx, mysqld.dbaPool)
+ if err != nil {
+ return err
+ }
+
+ err = mysqld.executeSuperQueryListConn(ctx, conn, []string{"FLUSH TABLES WITH READ LOCK"})
+ if err != nil {
+ conn.Recycle()
+ return err
+ }
+
+ mysqld.lockConn = conn
+ return nil
+}
+
+func (mysqld *Mysqld) ReleaseGlobalReadLock(ctx context.Context) error {
+ if mysqld.lockConn == nil {
+ return errors.New("no read locks acquired yet")
+ }
+
+ err := mysqld.executeSuperQueryListConn(ctx, mysqld.lockConn, []string{"UNLOCK TABLES"})
+ if err != nil {
+ return err
+ }
+
+ mysqld.lockConn.Recycle()
+ mysqld.lockConn = nil
+ return nil
+}
+
const (
masterPasswordStart = " MASTER_PASSWORD = '"
masterPasswordEnd = "',\n"
diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go
index 4aba50b8903..28970e1362d 100644
--- a/go/vt/mysqlctl/replication.go
+++ b/go/vt/mysqlctl/replication.go
@@ -38,6 +38,8 @@ import (
"vitess.io/vitess/go/vt/log"
)
+type ResetSuperReadOnlyFunc func() error
+
// WaitForReplicationStart waits until the deadline for replication to start.
// This validates the current primary is correct and can be connected to.
func WaitForReplicationStart(mysqld MysqlDaemon, replicaStartDeadline int) error {
@@ -231,6 +233,23 @@ func (mysqld *Mysqld) IsReadOnly() (bool, error) {
return false, nil
}
+// IsSuperReadOnly return true if the instance is super read only
+func (mysqld *Mysqld) IsSuperReadOnly(ctx context.Context) (bool, error) {
+ qr, err := mysqld.FetchSuperQuery(ctx, "SELECT @@global.super_read_only")
+ if err != nil {
+ return false, err
+ }
+
+ if len(qr.Rows) == 1 {
+ sro := qr.Rows[0][0].ToString()
+ if sro == "1" || sro == "ON" {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
// SetReadOnly set/unset the read_only flag
func (mysqld *Mysqld) SetReadOnly(on bool) error {
query := "SET GLOBAL read_only = "
@@ -242,15 +261,52 @@ func (mysqld *Mysqld) SetReadOnly(on bool) error {
return mysqld.ExecuteSuperQuery(context.TODO(), query)
}
-// SetSuperReadOnly set/unset the super_read_only flag
-func (mysqld *Mysqld) SetSuperReadOnly(on bool) error {
+// SetSuperReadOnly set/unset the super_read_only flag.
+// Returns a function which is called to set super_read_only back to its original value.
+func (mysqld *Mysqld) SetSuperReadOnly(ctx context.Context, on bool) (ResetSuperReadOnlyFunc, error) {
+ // return function for switching `OFF` super_read_only
+ var resetFunc ResetSuperReadOnlyFunc
+ var disableFunc = func() error {
+ query := "SET GLOBAL super_read_only = 'OFF'"
+ err := mysqld.ExecuteSuperQuery(context.Background(), query)
+ return err
+ }
+
+ // return function for switching `ON` super_read_only.
+ var enableFunc = func() error {
+ query := "SET GLOBAL super_read_only = 'ON'"
+ err := mysqld.ExecuteSuperQuery(context.Background(), query)
+ return err
+ }
+
+ superReadOnlyEnabled, err := mysqld.IsSuperReadOnly(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // If non-idempotent then set the right call-back.
+ // We are asked to turn on super_read_only but original value is false,
+ // therefore return disableFunc, that can be used as defer by caller.
+ if on && !superReadOnlyEnabled {
+ resetFunc = disableFunc
+ }
+ // We are asked to turn off super_read_only but original value is true,
+ // therefore return enableFunc, that can be used as defer by caller.
+ if !on && superReadOnlyEnabled {
+ resetFunc = enableFunc
+ }
+
query := "SET GLOBAL super_read_only = "
if on {
- query += "ON"
+ query += "'ON'"
} else {
- query += "OFF"
+ query += "'OFF'"
}
- return mysqld.ExecuteSuperQuery(context.TODO(), query)
+ if err := mysqld.ExecuteSuperQuery(context.Background(), query); err != nil {
+ return nil, err
+ }
+
+ return resetFunc, nil
}
// WaitSourcePos lets replicas wait to given replication position
@@ -396,14 +452,6 @@ func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, host string, por
if replicationStopBefore {
cmds = append(cmds, conn.StopReplicationCommand())
}
- // Reset replication parameters commands makes the instance forget the source host port
- // This is required because sometimes MySQL gets stuck due to improper initialization of
- // master info structure or related failures and throws errors like
- // ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log
- // These errors can only be resolved by resetting the replication parameters, otherwise START SLAVE fails.
- // Therefore, we have elected to always reset the replication parameters whenever we try to set the source host port
- // Since there is no real overhead, but it makes this function robust enough to also handle failures like these.
- cmds = append(cmds, conn.ResetReplicationParametersCommands()...)
smc := conn.SetReplicationSourceCommand(params, host, port, int(replicationConnectRetry.Seconds()))
cmds = append(cmds, smc)
if replicationStartAfter {
@@ -664,3 +712,16 @@ func (mysqld *Mysqld) SemiSyncReplicationStatus() (bool, error) {
}
return false, nil
}
+
+// SemiSyncExtensionLoaded returns whether semi-sync plugins are loaded.
+func (mysqld *Mysqld) SemiSyncExtensionLoaded() (bool, error) {
+ qr, err := mysqld.FetchSuperQuery(context.Background(), "SELECT COUNT(*) > 0 AS plugin_loaded FROM information_schema.plugins WHERE plugin_name LIKE 'rpl_semi_sync%'")
+ if err != nil {
+ return false, err
+ }
+ pluginPresent, err := qr.Rows[0][0].ToBool()
+ if err != nil {
+ return false, err
+ }
+ return pluginPresent, nil
+}
diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go
index 9c7ed553a25..273cbca0f2c 100644
--- a/go/vt/mysqlctl/schema.go
+++ b/go/vt/mysqlctl/schema.go
@@ -24,6 +24,7 @@ import (
"strings"
"sync"
+ "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/vterrors"
@@ -40,6 +41,14 @@ import (
var autoIncr = regexp.MustCompile(` AUTO_INCREMENT=\d+`)
+type EmptyColumnsErr struct {
+ dbName, tableName, query string
+}
+
+func (e EmptyColumnsErr) Error() string {
+ return fmt.Sprintf("unable to get columns for table %s.%s using query %s", e.dbName, e.tableName, e.query)
+}
+
// executeSchemaCommands executes some SQL commands, using the mysql
// command line tool. It uses the dba connection parameters, with credentials.
func (mysqld *Mysqld) executeSchemaCommands(sql string) error {
@@ -109,6 +118,15 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab
fields, columns, schema, err := mysqld.collectSchema(ctx, dbName, td.Name, td.Type, request.TableSchemaOnly)
if err != nil {
+ // There's a possible race condition: it could happen that a table was dropped in between reading
+ // the list of tables (collectBasicTableData(), earlier) and the point above where we investigate
+ // the table.
+ // This is fine. We identify the situation and keep the table without any fields/columns/key information
+ sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError)
+ if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNoSuchTable {
+ return
+ }
+
allErrors.RecordError(err)
cancel()
return
@@ -121,6 +139,8 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab
}
// Get primary columns concurrently.
+ // The below runs a single query on `INFORMATION_SCHEMA` and does not interact with the actual tables.
+ // It is therefore safe to run even if some tables are dropped in the interim.
colMap := map[string][]string{}
if len(tableNames) > 0 {
wg.Add(1)
@@ -275,6 +295,10 @@ const (
GetFieldsQuery = "SELECT %s FROM %s WHERE 1 != 1"
)
+// GetColumnsList returns the column names for a given table/view, using a query generating function.
+// Returned values:
+// - selectColumns: a string of comma delimited qualified names to be used in a SELECT query. e.g. "`id`, `name`, `val`"
+// - err: error
func GetColumnsList(dbName, tableName string, exec func(string, int, bool) (*sqltypes.Result, error)) (string, error) {
var dbName2 string
if dbName == "" {
@@ -288,8 +312,8 @@ func GetColumnsList(dbName, tableName string, exec func(string, int, bool) (*sql
return "", err
}
if qr == nil || len(qr.Rows) == 0 {
- err = fmt.Errorf("unable to get columns for table %s.%s using query %s", dbName, tableName, query)
- log.Errorf("%s", fmt.Errorf("unable to get columns for table %s.%s using query %s", dbName, tableName, query))
+ err := &EmptyColumnsErr{dbName: dbName, tableName: tableName, query: query}
+ log.Error(err.Error())
return "", err
}
selectColumns := ""
diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go
index d0d131e3cee..a0ee5d7493e 100644
--- a/go/vt/mysqlctl/xtrabackupengine.go
+++ b/go/vt/mysqlctl/xtrabackupengine.go
@@ -68,6 +68,7 @@ const (
streamModeTar = "tar"
xtrabackupBinaryName = "xtrabackup"
xtrabackupEngineName = "xtrabackup"
+ xtrabackupInfoFile = "xtrabackup_info"
xbstream = "xbstream"
// closeTimeout is the timeout for closing backup files after writing.
@@ -105,17 +106,17 @@ type xtraBackupManifest struct {
}
func init() {
- for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctld", "vtctldclient", "vtexplain"} {
+ for _, cmd := range []string{"vtcombo", "vttablet", "vtbackup", "vttestserver", "vtctldclient"} {
servenv.OnParseFor(cmd, registerXtraBackupEngineFlags)
}
}
func registerXtraBackupEngineFlags(fs *pflag.FlagSet) {
- fs.StringVar(&xtrabackupEnginePath, "xtrabackup_root_path", xtrabackupEnginePath, "directory location of the xtrabackup and xbstream executables, e.g., /usr/bin")
- fs.StringVar(&xtrabackupBackupFlags, "xtrabackup_backup_flags", xtrabackupBackupFlags, "flags to pass to backup command. These should be space separated and will be added to the end of the command")
- fs.StringVar(&xtrabackupPrepareFlags, "xtrabackup_prepare_flags", xtrabackupPrepareFlags, "flags to pass to prepare command. These should be space separated and will be added to the end of the command")
- fs.StringVar(&xbstreamRestoreFlags, "xbstream_restore_flags", xbstreamRestoreFlags, "flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt")
- fs.StringVar(&xtrabackupStreamMode, "xtrabackup_stream_mode", xtrabackupStreamMode, "which mode to use if streaming, valid values are tar and xbstream")
+ fs.StringVar(&xtrabackupEnginePath, "xtrabackup_root_path", xtrabackupEnginePath, "Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin")
+ fs.StringVar(&xtrabackupBackupFlags, "xtrabackup_backup_flags", xtrabackupBackupFlags, "Flags to pass to backup command. These should be space separated and will be added to the end of the command")
+ fs.StringVar(&xtrabackupPrepareFlags, "xtrabackup_prepare_flags", xtrabackupPrepareFlags, "Flags to pass to prepare command. These should be space separated and will be added to the end of the command")
+ fs.StringVar(&xbstreamRestoreFlags, "xbstream_restore_flags", xbstreamRestoreFlags, "Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt")
+ fs.StringVar(&xtrabackupStreamMode, "xtrabackup_stream_mode", xtrabackupStreamMode, "Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0")
fs.StringVar(&xtrabackupUser, "xtrabackup_user", xtrabackupUser, "User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.")
fs.UintVar(&xtrabackupStripes, "xtrabackup_stripes", xtrabackupStripes, "If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression")
fs.UintVar(&xtrabackupStripeBlockSize, "xtrabackup_stripe_block_size", xtrabackupStripeBlockSize, "Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe")
@@ -238,8 +239,14 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara
return true, nil
}
-func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, backupFileName string, numStripes int, flavor string) (replicationPosition mysql.Position, finalErr error) {
-
+func (be *XtrabackupEngine) backupFiles(
+ ctx context.Context,
+ params BackupParams,
+ bh backupstorage.BackupHandle,
+ backupFileName string,
+ numStripes int,
+ flavor string,
+) (replicationPosition mysql.Position, finalErr error) {
backupProgram := path.Join(xtrabackupEnginePath, xtrabackupBinaryName)
flagsToExec := []string{"--defaults-file=" + params.Cnf.Path,
"--backup",
@@ -247,6 +254,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams
"--slave-info",
"--user=" + xtrabackupUser,
"--target-dir=" + params.Cnf.TmpDir,
+ "--extra-lsndir=" + params.Cnf.TmpDir,
}
if xtrabackupStreamMode != "" {
flagsToExec = append(flagsToExec, "--stream="+xtrabackupStreamMode)
@@ -345,27 +353,14 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams
// the replication position. Note that if we don't read stderr as we go, the
// xtrabackup process gets blocked when the write buffer fills up.
stderrBuilder := &strings.Builder{}
- posBuilder := &strings.Builder{}
stderrDone := make(chan struct{})
go func() {
defer close(stderrDone)
scanner := bufio.NewScanner(backupErr)
- capture := false
for scanner.Scan() {
line := scanner.Text()
params.Logger.Infof("xtrabackup stderr: %s", line)
-
- // Wait until we see the first line of the binlog position.
- // Then capture all subsequent lines. We need multiple lines since
- // the value we're looking for has newlines in it.
- if !capture {
- if !strings.Contains(line, "MySQL binlog position") {
- continue
- }
- capture = true
- }
- fmt.Fprintln(posBuilder, line)
}
if err := scanner.Err(); err != nil {
params.Logger.Errorf("error reading from xtrabackup stderr: %v", err)
@@ -409,8 +404,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams
return replicationPosition, vterrors.Wrap(err, fmt.Sprintf("xtrabackup failed with error. Output=%s", sterrOutput))
}
- posOutput := posBuilder.String()
- replicationPosition, rerr := findReplicationPosition(posOutput, flavor, params.Logger)
+ replicationPosition, rerr := findReplicationPositionFromXtrabackupInfo(params.Cnf.TmpDir, flavor, params.Logger)
if rerr != nil {
return replicationPosition, vterrors.Wrap(rerr, "backup failed trying to find replication position")
}
@@ -451,7 +445,6 @@ func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestorePa
func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, bh backupstorage.BackupHandle, bm xtraBackupManifest, logger logutil.Logger) error {
// first download the file into a tmp dir
// and extract all the files
-
tempDir := fmt.Sprintf("%v/%v", cnf.TmpDir, time.Now().UTC().Format("xtrabackup-2006-01-02.150405"))
// create tempDir
if err := os.MkdirAll(tempDir, os.ModePerm); err != nil {
@@ -465,6 +458,16 @@ func (be *XtrabackupEngine) restoreFromBackup(ctx context.Context, cnf *Mycnf, b
}
}(tempDir, logger)
+ // For optimization, we are replacing pargzip with pgzip, so newBuiltinDecompressor doesn't have to compare and print warning for every file
+ // since newBuiltinDecompressor is helper method and does not hold any state, it was hard to do it in that method itself.
+ if bm.CompressionEngine == PargzipCompressor {
+ logger.Warningf(`engine "pargzip" doesn't support decompression, using "pgzip" instead`)
+ bm.CompressionEngine = PgzipCompressor
+ defer func() {
+ bm.CompressionEngine = PargzipCompressor
+ }()
+ }
+
if err := be.extractFiles(ctx, logger, bh, bm, tempDir); err != nil {
logger.Errorf("error extracting backup files: %v", err)
return err
@@ -685,6 +688,22 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log
return nil
}
+func findReplicationPositionFromXtrabackupInfo(directory, flavor string, logger logutil.Logger) (mysql.Position, error) {
+ f, err := os.Open(path.Join(directory, xtrabackupInfoFile))
+ if err != nil {
+ return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT,
+ "couldn't open %q to read GTID position", path.Join(directory, xtrabackupInfoFile))
+ }
+ defer f.Close()
+
+ contents, err := io.ReadAll(f)
+ if err != nil {
+ return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't read GTID position from %q", f.Name())
+ }
+
+ return findReplicationPosition(string(contents), flavor, logger)
+}
+
var xtrabackupReplicationPositionRegexp = regexp.MustCompile(`GTID of the last change '([^']*)'`)
func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql.Position, error) {
@@ -710,23 +729,6 @@ func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql
return replicationPosition, nil
}
-// scanLinesToLogger scans full lines from the given Reader and sends them to
-// the given Logger until EOF.
-func scanLinesToLogger(prefix string, reader io.Reader, logger logutil.Logger, doneFunc func()) {
- defer doneFunc()
-
- scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- line := scanner.Text()
- logger.Infof("%s: %s", prefix, line)
- }
- if err := scanner.Err(); err != nil {
- // This is usually run in a background goroutine, so there's no point
- // returning an error. Just log it.
- logger.Warningf("error scanning lines from %s: %v", prefix, err)
- }
-}
-
func stripeFileName(baseFileName string, index int) string {
return fmt.Sprintf("%s-%03d", baseFileName, index)
}
@@ -890,6 +892,13 @@ func (be *XtrabackupEngine) ShouldDrainForBackup() bool {
return false
}
+// ShouldStartMySQLAfterRestore signifies if this backup engine needs to restart MySQL once the restore is completed.
+func (be *XtrabackupEngine) ShouldStartMySQLAfterRestore() bool {
+ return true
+}
+
+func (be *XtrabackupEngine) Name() string { return xtrabackupEngineName }
+
func init() {
BackupRestoreEngineMap[xtrabackupEngineName] = &XtrabackupEngine{}
}
diff --git a/go/vt/mysqlctl/xtrabackupengine_test.go b/go/vt/mysqlctl/xtrabackupengine_test.go
index 26e53c6c949..7b1fdf86797 100644
--- a/go/vt/mysqlctl/xtrabackupengine_test.go
+++ b/go/vt/mysqlctl/xtrabackupengine_test.go
@@ -20,8 +20,12 @@ import (
"bytes"
"io"
"math/rand"
+ "os"
+ "path"
"testing"
+ "github.com/stretchr/testify/assert"
+
"vitess.io/vitess/go/vt/logutil"
)
@@ -51,26 +55,48 @@ func TestFindReplicationPosition(t *testing.T) {
}
}
-func TestFindReplicationPositionNoMatch(t *testing.T) {
+func TestFindReplicationPositionFromXtrabackupInfo(t *testing.T) {
+ input := `tool_version = 8.0.35-30
+ binlog_pos = filename 'vt-0476396352-bin.000005', position '310088991', GTID of the last change '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3,
+ 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3,
+ 47b59de1-b368-11e9-b48b-624401d35560:1-152981,
+ 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3,
+ 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169,
+ b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262'
+ format = xbstream
+ `
+ want := "145e508e-ae54-11e9-8ce6-46824dd1815e:1-3,1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3,47b59de1-b368-11e9-b48b-624401d35560:1-152981,557def0a-b368-11e9-84ed-f6fffd91cc57:1-3,599ef589-ae55-11e9-9688-ca1f44501925:1-14857169,b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262"
+
+ tmp, err := os.MkdirTemp(t.TempDir(), "test")
+ assert.NoError(t, err)
+
+ f, err := os.Create(path.Join(tmp, xtrabackupInfoFile))
+ assert.NoError(t, err)
+ _, err = f.WriteString(input)
+ assert.NoError(t, err)
+ assert.NoError(t, f.Close())
+
+ pos, err := findReplicationPositionFromXtrabackupInfo(tmp, "MySQL56", logutil.NewConsoleLogger())
+ assert.NoError(t, err)
+ assert.Equal(t, want, pos.String())
+}
+
+func TestFindReplicationPositionNoMatchFromXtrabackupInfo(t *testing.T) {
// Make sure failure to find a match triggers an error.
input := `nothing`
- _, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger())
- if err == nil {
- t.Fatalf("expected error from findReplicationPosition but got nil")
- }
+ _, err := findReplicationPositionFromXtrabackupInfo(input, "MySQL56", logutil.NewConsoleLogger())
+ assert.Error(t, err)
}
-func TestFindReplicationPositionEmptyMatch(t *testing.T) {
+func TestFindReplicationPositionEmptyMatchFromXtrabackupInfo(t *testing.T) {
// Make sure failure to find a match triggers an error.
input := `GTID of the last change '
'`
- _, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger())
- if err == nil {
- t.Fatalf("expected error from findReplicationPosition but got nil")
- }
+ _, err := findReplicationPositionFromXtrabackupInfo(input, "MySQL56", logutil.NewConsoleLogger())
+ assert.Error(t, err)
}
func TestStripeRoundTrip(t *testing.T) {
diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go
index 22d61cfdc5f..075ef39aec2 100644
--- a/go/vt/proto/binlogdata/binlogdata.pb.go
+++ b/go/vt/proto/binlogdata/binlogdata.pb.go
@@ -234,6 +234,10 @@ const (
VEventType_VERSION VEventType = 17
VEventType_LASTPK VEventType = 18
VEventType_SAVEPOINT VEventType = 19
+ // COPY_COMPLETED is sent when VTGate's VStream copy operation is done.
+ // If a client experiences some disruptions before receiving the event,
+ // the client should restart the copy operation.
+ VEventType_COPY_COMPLETED VEventType = 20
)
// Enum value maps for VEventType.
@@ -259,28 +263,30 @@ var (
17: "VERSION",
18: "LASTPK",
19: "SAVEPOINT",
+ 20: "COPY_COMPLETED",
}
VEventType_value = map[string]int32{
- "UNKNOWN": 0,
- "GTID": 1,
- "BEGIN": 2,
- "COMMIT": 3,
- "ROLLBACK": 4,
- "DDL": 5,
- "INSERT": 6,
- "REPLACE": 7,
- "UPDATE": 8,
- "DELETE": 9,
- "SET": 10,
- "OTHER": 11,
- "ROW": 12,
- "FIELD": 13,
- "HEARTBEAT": 14,
- "VGTID": 15,
- "JOURNAL": 16,
- "VERSION": 17,
- "LASTPK": 18,
- "SAVEPOINT": 19,
+ "UNKNOWN": 0,
+ "GTID": 1,
+ "BEGIN": 2,
+ "COMMIT": 3,
+ "ROLLBACK": 4,
+ "DDL": 5,
+ "INSERT": 6,
+ "REPLACE": 7,
+ "UPDATE": 8,
+ "DELETE": 9,
+ "SET": 10,
+ "OTHER": 11,
+ "ROW": 12,
+ "FIELD": 13,
+ "HEARTBEAT": 14,
+ "VGTID": 15,
+ "JOURNAL": 16,
+ "VERSION": 17,
+ "LASTPK": 18,
+ "SAVEPOINT": 19,
+ "COPY_COMPLETED": 20,
}
)
@@ -3031,7 +3037,7 @@ var file_binlogdata_proto_rawDesc = []byte{
0x4c, 0x10, 0x05, 0x2a, 0x34, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79,
0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x2a, 0xf9, 0x01, 0x0a, 0x0a, 0x56, 0x45,
+ 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x2a, 0x8d, 0x02, 0x0a, 0x0a, 0x56, 0x45,
0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12,
0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f,
@@ -3047,13 +3053,14 @@ var file_binlogdata_proto_rawDesc = []byte{
0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a,
0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41,
0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f,
- 0x49, 0x4e, 0x54, 0x10, 0x13, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53,
- 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29,
- 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65,
- 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62,
- 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, 0x43, 0x4f,
+ 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41,
+ 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x44, 0x53,
+ 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f,
+ 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go
index 67328389a72..8b4bd0a49ef 100644
--- a/go/vt/proto/query/query.pb.go
+++ b/go/vt/proto/query/query.pb.go
@@ -1178,6 +1178,12 @@ type ExecuteOptions struct {
// if the user has created temp tables, Vitess will not reuse plans created for this session in other sessions.
// The current session can still use other sessions cached plans.
HasCreatedTempTables bool `protobuf:"varint,12,opt,name=has_created_temp_tables,json=hasCreatedTempTables,proto3" json:"has_created_temp_tables,omitempty"`
+ // WorkloadName specifies the name of the workload as indicated in query directives. This is used for instrumentation
+ // in metrics and tracing spans.
+ WorkloadName string `protobuf:"bytes,15,opt,name=WorkloadName,proto3" json:"WorkloadName,omitempty"`
+ // priority specifies the priority of the query, between 0 and 100. This is leveraged by the transaction
+ // throttler to determine whether, under resource contention, a query should or should not be throttled.
+ Priority string `protobuf:"bytes,16,opt,name=priority,proto3" json:"priority,omitempty"`
}
func (x *ExecuteOptions) Reset() {
@@ -1268,6 +1274,20 @@ func (x *ExecuteOptions) GetHasCreatedTempTables() bool {
return false
}
+func (x *ExecuteOptions) GetWorkloadName() string {
+ if x != nil {
+ return x.WorkloadName
+ }
+ return ""
+}
+
+func (x *ExecuteOptions) GetPriority() string {
+ if x != nil {
+ return x.Priority
+ }
+ return ""
+}
+
// Field describes a single column returned by a query
type Field struct {
state protoimpl.MessageState
@@ -5069,16 +5089,22 @@ type StreamHealthResponse struct {
//
// In practice, this field is set to:
// a) the last time the RPC tabletmanager.TabletExternallyReparented was
- // called on this tablet (usually done by an external failover tool e.g.
- // Orchestrator). The failover tool can call this as long as we are the
- // primary i.e. even ages after the last reparent occurred.
+ //
+ // called on this tablet (usually done by an external failover tool e.g.
+ // Orchestrator). The failover tool can call this as long as we are the
+ // primary i.e. even ages after the last reparent occurred.
+ //
// OR
// b) the last time an active reparent was executed through a vtctl command
- // (InitShardPrimary, PlannedReparentShard, EmergencyReparentShard)
+ //
+ // (InitShardPrimary, PlannedReparentShard, EmergencyReparentShard)
+ //
// OR
// c) the last time vttablet was started and it initialized its tablet type
- // as PRIMARY because it was recorded as the shard's current primary in the
- // topology (see go/vt/vttablet/tabletmanager/init_tablet.go)
+ //
+ // as PRIMARY because it was recorded as the shard's current primary in the
+ // topology (see go/vt/vttablet/tabletmanager/init_tablet.go)
+ //
// OR
// d) 0 if the vttablet was never a PRIMARY.
TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp,proto3" json:"tablet_externally_reparented_timestamp,omitempty"`
@@ -5362,7 +5388,7 @@ var file_query_proto_rawDesc = []byte{
0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69,
0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0xc5, 0x07, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x85, 0x08, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x66,
0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x71, 0x75,
0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
@@ -5394,7 +5420,11 @@ var file_query_proto_rawDesc = []byte{
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x61, 0x73, 0x5f, 0x63, 0x72,
0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x61, 0x73, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3b, 0x0a,
+ 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x22, 0x0a,
+ 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x10, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x3b, 0x0a,
0x0e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12,
0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45,
0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
diff --git a/go/vt/proto/query/query_vtproto.pb.go b/go/vt/proto/query/query_vtproto.pb.go
index 7ed228cb100..6f205943dca 100644
--- a/go/vt/proto/query/query_vtproto.pb.go
+++ b/go/vt/proto/query/query_vtproto.pb.go
@@ -377,6 +377,22 @@ func (m *ExecuteOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.Priority) > 0 {
+ i -= len(m.Priority)
+ copy(dAtA[i:], m.Priority)
+ i = encodeVarint(dAtA, i, uint64(len(m.Priority)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.WorkloadName) > 0 {
+ i -= len(m.WorkloadName)
+ copy(dAtA[i:], m.WorkloadName)
+ i = encodeVarint(dAtA, i, uint64(len(m.WorkloadName)))
+ i--
+ dAtA[i] = 0x7a
+ }
if m.HasCreatedTempTables {
i--
if m.HasCreatedTempTables {
@@ -4275,6 +4291,14 @@ func (m *ExecuteOptions) SizeVT() (n int) {
if m.HasCreatedTempTables {
n += 2
}
+ l = len(m.WorkloadName)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Priority)
+ if l > 0 {
+ n += 2 + l + sov(uint64(l))
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -6836,6 +6860,70 @@ func (m *ExecuteOptions) UnmarshalVT(dAtA []byte) error {
}
}
m.HasCreatedTempTables = bool(v != 0)
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WorkloadName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.WorkloadName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Priority = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go
index 7fc537b9cca..5bb14752f4f 100644
--- a/go/vt/proto/replicationdata/replicationdata.pb.go
+++ b/go/vt/proto/replicationdata/replicationdata.pb.go
@@ -92,6 +92,8 @@ type Status struct {
unknownFields protoimpl.UnknownFields
Position string `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"`
+ IoThreadRunning bool `protobuf:"varint,2,opt,name=io_thread_running,json=ioThreadRunning,proto3" json:"io_thread_running,omitempty"`
+ SqlThreadRunning bool `protobuf:"varint,3,opt,name=sql_thread_running,json=sqlThreadRunning,proto3" json:"sql_thread_running,omitempty"`
ReplicationLagSeconds uint32 `protobuf:"varint,4,opt,name=replication_lag_seconds,json=replicationLagSeconds,proto3" json:"replication_lag_seconds,omitempty"`
SourceHost string `protobuf:"bytes,5,opt,name=source_host,json=sourceHost,proto3" json:"source_host,omitempty"`
SourcePort int32 `protobuf:"varint,6,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"`
@@ -114,6 +116,7 @@ type Status struct {
HasReplicationFilters bool `protobuf:"varint,22,opt,name=has_replication_filters,json=hasReplicationFilters,proto3" json:"has_replication_filters,omitempty"`
SslAllowed bool `protobuf:"varint,23,opt,name=ssl_allowed,json=sslAllowed,proto3" json:"ssl_allowed,omitempty"`
ReplicationLagUnknown bool `protobuf:"varint,24,opt,name=replication_lag_unknown,json=replicationLagUnknown,proto3" json:"replication_lag_unknown,omitempty"`
+ BackupRunning bool `protobuf:"varint,25,opt,name=backup_running,json=backupRunning,proto3" json:"backup_running,omitempty"`
}
func (x *Status) Reset() {
@@ -155,6 +158,20 @@ func (x *Status) GetPosition() string {
return ""
}
+func (x *Status) GetIoThreadRunning() bool {
+ if x != nil {
+ return x.IoThreadRunning
+ }
+ return false
+}
+
+func (x *Status) GetSqlThreadRunning() bool {
+ if x != nil {
+ return x.SqlThreadRunning
+ }
+ return false
+}
+
func (x *Status) GetReplicationLagSeconds() uint32 {
if x != nil {
return x.ReplicationLagSeconds
@@ -302,6 +319,13 @@ func (x *Status) GetReplicationLagUnknown() bool {
return false
}
+func (x *Status) GetBackupRunning() bool {
+ if x != nil {
+ return x.BackupRunning
+ }
+ return false
+}
+
// StopReplicationStatus represents the replication status before calling StopReplication, and the replication status collected immediately after
// calling StopReplication.
type StopReplicationStatus struct {
@@ -620,145 +644,152 @@ var File_replicationdata_proto protoreflect.FileDescriptor
var file_replicationdata_proto_rawDesc = []byte{
0x0a, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74,
0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x22, 0x96, 0x07, 0x0a, 0x06, 0x53, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x08, 0x0a, 0x06, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c,
- 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d,
- 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67,
- 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x2c,
- 0x0a, 0x12, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6c, 0x61,
- 0x79, 0x4c, 0x6f, 0x67, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d,
- 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20,
+ 0x2a, 0x0a, 0x11, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x6f, 0x54, 0x68,
+ 0x72, 0x65, 0x61, 0x64, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+ 0x71, 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
+ 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x71, 0x6c, 0x54, 0x68, 0x72, 0x65,
+ 0x61, 0x64, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64,
+ 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x6f,
+ 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72,
+ 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50,
+ 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72,
+ 0x65, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x6c, 0x61,
+ 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x4c, 0x6f, 0x67, 0x50, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66,
+ 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x2b, 0x72,
+ 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e,
+ 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x26, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x4c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x75, 0x69,
+ 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55,
+ 0x75, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+ 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x69, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22,
+ 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x49, 0x6f, 0x45, 0x72, 0x72,
+ 0x6f, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x71, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+ 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x71, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12,
+ 0x24, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x71, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x71, 0x6c,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x35, 0x0a, 0x17, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6c,
+ 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x4c, 0x6f, 0x67,
+ 0x46, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x18, 0x12, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x0a,
+ 0x09, 0x73, 0x71, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x08, 0x73, 0x71, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x75,
+ 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36,
+ 0x0a, 0x17, 0x68, 0x61, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x15, 0x68, 0x61, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x73, 0x6c, 0x5f, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x6c,
+ 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
+ 0x67, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52,
+ 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x77, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x2f, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65,
+ 0x12, 0x2d, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x22,
+ 0x50, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d,
+ 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x5b, 0x0a, 0x2b, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x65, 0x71, 0x75,
- 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x26, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x4c, 0x6f, 0x67,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x45, 0x71, 0x75, 0x69,
- 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28,
- 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x75, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6f, 0x5f,
- 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x69, 0x6f, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x69, 0x6f, 0x5f,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x73,
- 0x74, 0x49, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x71, 0x6c, 0x5f,
- 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x71, 0x6c,
- 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x71,
- 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c,
- 0x61, 0x73, 0x74, 0x53, 0x71, 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x35, 0x0a, 0x17, 0x72,
- 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f,
- 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x72, 0x65,
- 0x6c, 0x61, 0x79, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x73, 0x65,
- 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55,
- 0x73, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x71, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79,
- 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x71, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79,
- 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x73,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x67,
- 0x74, 0x69, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, 0x73, 0x69, 0x6e, 0x67,
- 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x68, 0x61, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18,
- 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x68, 0x61, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
- 0x73, 0x73, 0x6c, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x0a, 0x73, 0x73, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x36, 0x0a,
- 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67,
- 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15,
- 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x55, 0x6e,
- 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10,
- 0x04, 0x22, 0x77, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x65,
- 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x61,
- 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70,
+ 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x75, 0x69, 0x64, 0x12, 0x46,
+ 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70,
0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x22, 0x50, 0x0a, 0x0d, 0x50, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70,
- 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x5f,
- 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc3, 0x07, 0x0a,
- 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x72, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x75, 0x69, 0x64, 0x12, 0x46, 0x0a, 0x12, 0x72, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11,
- 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x67, 0x74, 0x69, 0x64,
- 0x5f, 0x70, 0x75, 0x72, 0x67, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x67,
- 0x74, 0x69, 0x64, 0x50, 0x75, 0x72, 0x67, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x74, 0x69,
- 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x67, 0x74,
- 0x69, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
- 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62,
- 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62,
- 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x6f, 0x77,
- 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x69, 0x6e,
- 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d,
- 0x6c, 0x6f, 0x67, 0x42, 0x69, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a,
- 0x13, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6c, 0x6f, 0x67, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a,
- 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69,
- 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x65, 0x6e,
- 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x65, 0x6d,
- 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x45, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63,
- 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
- 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x18,
- 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15,
- 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79,
- 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79,
- 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73,
- 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x13, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69,
- 0x6d, 0x61, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x20, 0x73,
- 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f,
- 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
- 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x57,
- 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4f, 0x41,
- 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a,
- 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x42,
- 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74,
- 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x75, 0x73, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d,
+ 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x67, 0x74, 0x69, 0x64, 0x5f, 0x70, 0x75, 0x72, 0x67, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0a, 0x67, 0x74, 0x69, 0x64, 0x50, 0x75, 0x72, 0x67, 0x65, 0x64, 0x12, 0x18,
+ 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b,
+ 0x0a, 0x09, 0x67, 0x74, 0x69, 0x64, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x67, 0x74, 0x69, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62,
+ 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x77, 0x5f, 0x69,
+ 0x6d, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c,
+ 0x6f, 0x67, 0x52, 0x6f, 0x77, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f,
+ 0x67, 0x5f, 0x62, 0x69, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x42, 0x69, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x11, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f,
+ 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x39, 0x0a,
+ 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6d, 0x69,
+ 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6d, 0x69,
+ 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x11, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65,
+ 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x73,
+ 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x43, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79,
+ 0x6e, 0x63, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79,
+ 0x6e, 0x63, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x12, 0x45, 0x0a, 0x20, 0x73, 0x65, 0x6d, 0x69, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x77, 0x61,
+ 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x73, 0x65, 0x6d, 0x69,
+ 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12,
+ 0x0a, 0x0e, 0x49, 0x4f, 0x41, 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44,
+ 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x4f, 0x4e,
+ 0x4c, 0x59, 0x10, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69,
+ 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go
index 9a7b297a4fa..ad2bde37e40 100644
--- a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go
+++ b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go
@@ -48,6 +48,18 @@ func (m *Status) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.BackupRunning {
+ i--
+ if m.BackupRunning {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc8
+ }
if m.ReplicationLagUnknown {
i--
if m.ReplicationLagUnknown {
@@ -214,6 +226,26 @@ func (m *Status) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x20
}
+ if m.SqlThreadRunning {
+ i--
+ if m.SqlThreadRunning {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.IoThreadRunning {
+ i--
+ if m.IoThreadRunning {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
if len(m.Position) > 0 {
i -= len(m.Position)
copy(dAtA[i:], m.Position)
@@ -547,6 +579,12 @@ func (m *Status) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
+ if m.IoThreadRunning {
+ n += 2
+ }
+ if m.SqlThreadRunning {
+ n += 2
+ }
if m.ReplicationLagSeconds != 0 {
n += 1 + sov(uint64(m.ReplicationLagSeconds))
}
@@ -619,6 +657,9 @@ func (m *Status) SizeVT() (n int) {
if m.ReplicationLagUnknown {
n += 3
}
+ if m.BackupRunning {
+ n += 3
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -813,6 +854,46 @@ func (m *Status) UnmarshalVT(dAtA []byte) error {
}
m.Position = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IoThreadRunning", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IoThreadRunning = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SqlThreadRunning", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SqlThreadRunning = bool(v != 0)
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ReplicationLagSeconds", wireType)
@@ -1334,6 +1415,26 @@ func (m *Status) UnmarshalVT(dAtA []byte) error {
}
}
m.ReplicationLagUnknown = bool(v != 0)
+ case 25:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BackupRunning", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.BackupRunning = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go
index f32eeee32c7..1833cf66782 100644
--- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go
+++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go
@@ -4495,6 +4495,8 @@ type BackupRequest struct {
Concurrency int64 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"`
+ // BackupEngine specifies if we want to use a particular backup engine for this backup request
+ BackupEngine *string `protobuf:"bytes,5,opt,name=backup_engine,json=backupEngine,proto3,oneof" json:"backup_engine,omitempty"`
}
func (x *BackupRequest) Reset() {
@@ -4543,6 +4545,13 @@ func (x *BackupRequest) GetAllowPrimary() bool {
return false
}
+func (x *BackupRequest) GetBackupEngine() string {
+ if x != nil && x.BackupEngine != nil {
+ return *x.BackupEngine
+ }
+ return ""
+}
+
type BackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4596,6 +4605,8 @@ type RestoreFromBackupRequest struct {
unknownFields protoimpl.UnknownFields
BackupTime *vttime.Time `protobuf:"bytes,1,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"`
+ // AllowedBackupEngines, if present will filter out any backups taken with engines not included in the list
+ AllowedBackupEngines []string `protobuf:"bytes,5,rep,name=allowed_backup_engines,json=allowedBackupEngines,proto3" json:"allowed_backup_engines,omitempty"`
}
func (x *RestoreFromBackupRequest) Reset() {
@@ -4637,6 +4648,13 @@ func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time {
return nil
}
+func (x *RestoreFromBackupRequest) GetAllowedBackupEngines() []string {
+ if x != nil {
+ return x.AllowedBackupEngines
+ }
+ return nil
+}
+
type RestoreFromBackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5673,104 +5691,112 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{
0x22, 0x34, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69,
0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f,
0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f,
- 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c,
- 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x36,
- 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52,
- 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x49, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
- 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d,
- 0x65, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24,
- 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x22, 0x5c, 0x0a, 0x0c, 0x56, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f,
- 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f,
- 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x22, 0x3b, 0x0a, 0x0d, 0x56, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72,
- 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22,
- 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
- 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12,
- 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69,
- 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75,
- 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
- 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f,
- 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66,
- 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69,
- 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f,
- 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12,
- 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c,
- 0x22, 0x6a, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x0a, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70,
- 0x5f, 0x6b, 0x5f, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79,
- 0x50, 0x4b, 0x53, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x82, 0x02, 0x0a,
- 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74,
- 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61,
- 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f,
- 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52,
- 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12,
- 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27,
- 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64,
- 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65,
- 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d,
- 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45,
- 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72,
- 0x65, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
- 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66,
- 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f,
- 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
- 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x92, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x28, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x62, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a,
+ 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c,
+ 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x22, 0x7f, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72,
+ 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x34,
+ 0x0a, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x67,
+ 0x69, 0x6e, 0x65, 0x73, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46,
+ 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x5c, 0x0a, 0x0c, 0x56, 0x45, 0x78, 0x65, 0x63,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a,
+ 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x3b, 0x0a, 0x0d, 0x56, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72,
+ 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41,
+ 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69,
+ 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d,
+ 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a,
+ 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69,
+ 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76,
+ 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66,
+ 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65,
+ 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43,
+ 0x65, 0x6c, 0x6c, 0x22, 0x6a, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f,
+ 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x0a, 0x6f, 0x6e, 0x6c,
+ 0x79, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x50, 0x4b, 0x53, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62,
+ 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22,
+ 0x82, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d,
+ 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d,
+ 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
+ 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
+ 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, 0x74,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, 0x63,
+ 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61,
+ 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f,
+ 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d,
+ 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d,
+ 0x70, 0x61, 0x72, 0x65, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44,
+ 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b,
+ 0x63, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x72,
+ 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70,
+ 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6f,
+ 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74,
+ 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
var (
@@ -7212,6 +7238,7 @@ func file_tabletmanagerdata_proto_init() {
}
}
}
+ file_tabletmanagerdata_proto_msgTypes[92].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go
index 839d519ebc9..986fbaf4ee8 100644
--- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go
+++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go
@@ -4001,6 +4001,13 @@ func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.BackupEngine != nil {
+ i -= len(*m.BackupEngine)
+ copy(dAtA[i:], *m.BackupEngine)
+ i = encodeVarint(dAtA, i, uint64(len(*m.BackupEngine)))
+ i--
+ dAtA[i] = 0x2a
+ }
if m.AllowPrimary {
i--
if m.AllowPrimary {
@@ -4092,6 +4099,15 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.AllowedBackupEngines) > 0 {
+ for iNdEx := len(m.AllowedBackupEngines) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AllowedBackupEngines[iNdEx])
+ copy(dAtA[i:], m.AllowedBackupEngines[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.AllowedBackupEngines[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
if m.BackupTime != nil {
size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
@@ -6198,6 +6214,10 @@ func (m *BackupRequest) SizeVT() (n int) {
if m.AllowPrimary {
n += 2
}
+ if m.BackupEngine != nil {
+ l = len(*m.BackupEngine)
+ n += 1 + l + sov(uint64(l))
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -6230,6 +6250,12 @@ func (m *RestoreFromBackupRequest) SizeVT() (n int) {
l = m.BackupTime.SizeVT()
n += 1 + l + sov(uint64(l))
}
+ if len(m.AllowedBackupEngines) > 0 {
+ for _, s := range m.AllowedBackupEngines {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -14671,6 +14697,39 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error {
}
}
m.AllowPrimary = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BackupEngine", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.BackupEngine = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -14845,6 +14904,38 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowedBackupEngines", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AllowedBackupEngines = append(m.AllowedBackupEngines, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go
index a9797c58282..99cf8dfe48f 100644
--- a/go/vt/proto/topodata/topodata.pb.go
+++ b/go/vt/proto/topodata/topodata.pb.go
@@ -402,7 +402,6 @@ type Tablet struct {
// about which tablet should be the primary, such as via Vitess
// replication-management commands like PlannedReparentShard,
// EmergencyReparentShard, and TabletExternallyReparented.
- //
PrimaryTermStartTime *vttime.Time `protobuf:"bytes,14,opt,name=primary_term_start_time,json=primaryTermStartTime,proto3" json:"primary_term_start_time,omitempty"`
// db_server_version represents the database version used by the tablet.
DbServerVersion string `protobuf:"bytes,15,opt,name=db_server_version,json=dbServerVersion,proto3" json:"db_server_version,omitempty"`
diff --git a/go/vt/proto/vtadmin/vtadmin.pb.go b/go/vt/proto/vtadmin/vtadmin.pb.go
index c9f075b54c9..961635f1d89 100644
--- a/go/vt/proto/vtadmin/vtadmin.pb.go
+++ b/go/vt/proto/vtadmin/vtadmin.pb.go
@@ -2081,6 +2081,61 @@ func (x *GetClustersResponse) GetClusters() []*Cluster {
return nil
}
+type GetFullStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Alias *topodata.TabletAlias `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"`
+}
+
+func (x *GetFullStatusRequest) Reset() {
+ *x = GetFullStatusRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetFullStatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetFullStatusRequest) ProtoMessage() {}
+
+func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead.
+func (*GetFullStatusRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *GetFullStatusRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *GetFullStatusRequest) GetAlias() *topodata.TabletAlias {
+ if x != nil {
+ return x.Alias
+ }
+ return nil
+}
+
type GetGatesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2092,7 +2147,7 @@ type GetGatesRequest struct {
func (x *GetGatesRequest) Reset() {
*x = GetGatesRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[33]
+ mi := &file_vtadmin_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2105,7 +2160,7 @@ func (x *GetGatesRequest) String() string {
func (*GetGatesRequest) ProtoMessage() {}
func (x *GetGatesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[33]
+ mi := &file_vtadmin_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2118,7 +2173,7 @@ func (x *GetGatesRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetGatesRequest.ProtoReflect.Descriptor instead.
func (*GetGatesRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{33}
+ return file_vtadmin_proto_rawDescGZIP(), []int{34}
}
func (x *GetGatesRequest) GetClusterIds() []string {
@@ -2139,7 +2194,7 @@ type GetGatesResponse struct {
func (x *GetGatesResponse) Reset() {
*x = GetGatesResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[34]
+ mi := &file_vtadmin_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2152,7 +2207,7 @@ func (x *GetGatesResponse) String() string {
func (*GetGatesResponse) ProtoMessage() {}
func (x *GetGatesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[34]
+ mi := &file_vtadmin_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2165,7 +2220,7 @@ func (x *GetGatesResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetGatesResponse.ProtoReflect.Descriptor instead.
func (*GetGatesResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{34}
+ return file_vtadmin_proto_rawDescGZIP(), []int{35}
}
func (x *GetGatesResponse) GetGates() []*VTGate {
@@ -2187,7 +2242,7 @@ type GetKeyspaceRequest struct {
func (x *GetKeyspaceRequest) Reset() {
*x = GetKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[35]
+ mi := &file_vtadmin_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2200,7 +2255,7 @@ func (x *GetKeyspaceRequest) String() string {
func (*GetKeyspaceRequest) ProtoMessage() {}
func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[35]
+ mi := &file_vtadmin_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2213,7 +2268,7 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{35}
+ return file_vtadmin_proto_rawDescGZIP(), []int{36}
}
func (x *GetKeyspaceRequest) GetClusterId() string {
@@ -2241,7 +2296,7 @@ type GetKeyspacesRequest struct {
func (x *GetKeyspacesRequest) Reset() {
*x = GetKeyspacesRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[36]
+ mi := &file_vtadmin_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2254,7 +2309,7 @@ func (x *GetKeyspacesRequest) String() string {
func (*GetKeyspacesRequest) ProtoMessage() {}
func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[36]
+ mi := &file_vtadmin_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2267,7 +2322,7 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead.
func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{36}
+ return file_vtadmin_proto_rawDescGZIP(), []int{37}
}
func (x *GetKeyspacesRequest) GetClusterIds() []string {
@@ -2288,7 +2343,7 @@ type GetKeyspacesResponse struct {
func (x *GetKeyspacesResponse) Reset() {
*x = GetKeyspacesResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[37]
+ mi := &file_vtadmin_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2301,7 +2356,7 @@ func (x *GetKeyspacesResponse) String() string {
func (*GetKeyspacesResponse) ProtoMessage() {}
func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[37]
+ mi := &file_vtadmin_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2314,7 +2369,7 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead.
func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{37}
+ return file_vtadmin_proto_rawDescGZIP(), []int{38}
}
func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace {
@@ -2338,7 +2393,7 @@ type GetSchemaRequest struct {
func (x *GetSchemaRequest) Reset() {
*x = GetSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[38]
+ mi := &file_vtadmin_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2351,7 +2406,7 @@ func (x *GetSchemaRequest) String() string {
func (*GetSchemaRequest) ProtoMessage() {}
func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[38]
+ mi := &file_vtadmin_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2364,7 +2419,7 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{38}
+ return file_vtadmin_proto_rawDescGZIP(), []int{39}
}
func (x *GetSchemaRequest) GetClusterId() string {
@@ -2407,7 +2462,7 @@ type GetSchemasRequest struct {
func (x *GetSchemasRequest) Reset() {
*x = GetSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[39]
+ mi := &file_vtadmin_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2420,7 +2475,7 @@ func (x *GetSchemasRequest) String() string {
func (*GetSchemasRequest) ProtoMessage() {}
func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[39]
+ mi := &file_vtadmin_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2433,7 +2488,7 @@ func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{39}
+ return file_vtadmin_proto_rawDescGZIP(), []int{40}
}
func (x *GetSchemasRequest) GetClusterIds() []string {
@@ -2461,7 +2516,7 @@ type GetSchemasResponse struct {
func (x *GetSchemasResponse) Reset() {
*x = GetSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[40]
+ mi := &file_vtadmin_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2474,7 +2529,7 @@ func (x *GetSchemasResponse) String() string {
func (*GetSchemasResponse) ProtoMessage() {}
func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[40]
+ mi := &file_vtadmin_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2487,7 +2542,7 @@ func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{40}
+ return file_vtadmin_proto_rawDescGZIP(), []int{41}
}
func (x *GetSchemasResponse) GetSchemas() []*Schema {
@@ -2517,7 +2572,7 @@ type GetShardReplicationPositionsRequest struct {
func (x *GetShardReplicationPositionsRequest) Reset() {
*x = GetShardReplicationPositionsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[41]
+ mi := &file_vtadmin_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2530,7 +2585,7 @@ func (x *GetShardReplicationPositionsRequest) String() string {
func (*GetShardReplicationPositionsRequest) ProtoMessage() {}
func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[41]
+ mi := &file_vtadmin_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2543,7 +2598,7 @@ func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Messag
// Deprecated: Use GetShardReplicationPositionsRequest.ProtoReflect.Descriptor instead.
func (*GetShardReplicationPositionsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{41}
+ return file_vtadmin_proto_rawDescGZIP(), []int{42}
}
func (x *GetShardReplicationPositionsRequest) GetClusterIds() []string {
@@ -2578,7 +2633,7 @@ type GetShardReplicationPositionsResponse struct {
func (x *GetShardReplicationPositionsResponse) Reset() {
*x = GetShardReplicationPositionsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[42]
+ mi := &file_vtadmin_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2591,7 +2646,7 @@ func (x *GetShardReplicationPositionsResponse) String() string {
func (*GetShardReplicationPositionsResponse) ProtoMessage() {}
func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[42]
+ mi := &file_vtadmin_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2604,7 +2659,7 @@ func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Messa
// Deprecated: Use GetShardReplicationPositionsResponse.ProtoReflect.Descriptor instead.
func (*GetShardReplicationPositionsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{42}
+ return file_vtadmin_proto_rawDescGZIP(), []int{43}
}
func (x *GetShardReplicationPositionsResponse) GetReplicationPositions() []*ClusterShardReplicationPosition {
@@ -2626,7 +2681,7 @@ type GetSrvVSchemaRequest struct {
func (x *GetSrvVSchemaRequest) Reset() {
*x = GetSrvVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[43]
+ mi := &file_vtadmin_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2639,7 +2694,7 @@ func (x *GetSrvVSchemaRequest) String() string {
func (*GetSrvVSchemaRequest) ProtoMessage() {}
func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[43]
+ mi := &file_vtadmin_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2652,7 +2707,7 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{43}
+ return file_vtadmin_proto_rawDescGZIP(), []int{44}
}
func (x *GetSrvVSchemaRequest) GetClusterId() string {
@@ -2681,7 +2736,7 @@ type GetSrvVSchemasRequest struct {
func (x *GetSrvVSchemasRequest) Reset() {
*x = GetSrvVSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[44]
+ mi := &file_vtadmin_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2694,7 +2749,7 @@ func (x *GetSrvVSchemasRequest) String() string {
func (*GetSrvVSchemasRequest) ProtoMessage() {}
func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[44]
+ mi := &file_vtadmin_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2707,7 +2762,7 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{44}
+ return file_vtadmin_proto_rawDescGZIP(), []int{45}
}
func (x *GetSrvVSchemasRequest) GetClusterIds() []string {
@@ -2735,7 +2790,7 @@ type GetSrvVSchemasResponse struct {
func (x *GetSrvVSchemasResponse) Reset() {
*x = GetSrvVSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[45]
+ mi := &file_vtadmin_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2748,7 +2803,7 @@ func (x *GetSrvVSchemasResponse) String() string {
func (*GetSrvVSchemasResponse) ProtoMessage() {}
func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[45]
+ mi := &file_vtadmin_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2761,7 +2816,7 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{45}
+ return file_vtadmin_proto_rawDescGZIP(), []int{46}
}
func (x *GetSrvVSchemasResponse) GetSrvVSchemas() []*SrvVSchema {
@@ -2783,7 +2838,7 @@ type GetSchemaTableSizeOptions struct {
func (x *GetSchemaTableSizeOptions) Reset() {
*x = GetSchemaTableSizeOptions{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[46]
+ mi := &file_vtadmin_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2796,7 +2851,7 @@ func (x *GetSchemaTableSizeOptions) String() string {
func (*GetSchemaTableSizeOptions) ProtoMessage() {}
func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[46]
+ mi := &file_vtadmin_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2809,7 +2864,7 @@ func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSchemaTableSizeOptions.ProtoReflect.Descriptor instead.
func (*GetSchemaTableSizeOptions) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{46}
+ return file_vtadmin_proto_rawDescGZIP(), []int{47}
}
func (x *GetSchemaTableSizeOptions) GetAggregateSizes() bool {
@@ -2842,7 +2897,7 @@ type GetTabletRequest struct {
func (x *GetTabletRequest) Reset() {
*x = GetTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[47]
+ mi := &file_vtadmin_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2855,7 +2910,7 @@ func (x *GetTabletRequest) String() string {
func (*GetTabletRequest) ProtoMessage() {}
func (x *GetTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[47]
+ mi := &file_vtadmin_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2868,7 +2923,7 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead.
func (*GetTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{47}
+ return file_vtadmin_proto_rawDescGZIP(), []int{48}
}
func (x *GetTabletRequest) GetAlias() *topodata.TabletAlias {
@@ -2896,7 +2951,7 @@ type GetTabletsRequest struct {
func (x *GetTabletsRequest) Reset() {
*x = GetTabletsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[48]
+ mi := &file_vtadmin_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2909,7 +2964,7 @@ func (x *GetTabletsRequest) String() string {
func (*GetTabletsRequest) ProtoMessage() {}
func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[48]
+ mi := &file_vtadmin_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2922,7 +2977,7 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead.
func (*GetTabletsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{48}
+ return file_vtadmin_proto_rawDescGZIP(), []int{49}
}
func (x *GetTabletsRequest) GetClusterIds() []string {
@@ -2943,7 +2998,7 @@ type GetTabletsResponse struct {
func (x *GetTabletsResponse) Reset() {
*x = GetTabletsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[49]
+ mi := &file_vtadmin_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2956,7 +3011,7 @@ func (x *GetTabletsResponse) String() string {
func (*GetTabletsResponse) ProtoMessage() {}
func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[49]
+ mi := &file_vtadmin_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2969,7 +3024,7 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead.
func (*GetTabletsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{49}
+ return file_vtadmin_proto_rawDescGZIP(), []int{50}
}
func (x *GetTabletsResponse) GetTablets() []*Tablet {
@@ -2979,6 +3034,61 @@ func (x *GetTabletsResponse) GetTablets() []*Tablet {
return nil
}
+type GetTopologyPathRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *GetTopologyPathRequest) Reset() {
+ *x = GetTopologyPathRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopologyPathRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopologyPathRequest) ProtoMessage() {}
+
+func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[51]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead.
+func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{51}
+}
+
+func (x *GetTopologyPathRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *GetTopologyPathRequest) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
type GetVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2991,7 +3101,7 @@ type GetVSchemaRequest struct {
func (x *GetVSchemaRequest) Reset() {
*x = GetVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[50]
+ mi := &file_vtadmin_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3004,7 +3114,7 @@ func (x *GetVSchemaRequest) String() string {
func (*GetVSchemaRequest) ProtoMessage() {}
func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[50]
+ mi := &file_vtadmin_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3017,7 +3127,7 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{50}
+ return file_vtadmin_proto_rawDescGZIP(), []int{52}
}
func (x *GetVSchemaRequest) GetClusterId() string {
@@ -3045,7 +3155,7 @@ type GetVSchemasRequest struct {
func (x *GetVSchemasRequest) Reset() {
*x = GetVSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[51]
+ mi := &file_vtadmin_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3058,7 +3168,7 @@ func (x *GetVSchemasRequest) String() string {
func (*GetVSchemasRequest) ProtoMessage() {}
func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[51]
+ mi := &file_vtadmin_proto_msgTypes[53]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3071,7 +3181,7 @@ func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemasRequest.ProtoReflect.Descriptor instead.
func (*GetVSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{51}
+ return file_vtadmin_proto_rawDescGZIP(), []int{53}
}
func (x *GetVSchemasRequest) GetClusterIds() []string {
@@ -3092,7 +3202,7 @@ type GetVSchemasResponse struct {
func (x *GetVSchemasResponse) Reset() {
*x = GetVSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[52]
+ mi := &file_vtadmin_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3105,7 +3215,7 @@ func (x *GetVSchemasResponse) String() string {
func (*GetVSchemasResponse) ProtoMessage() {}
func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[52]
+ mi := &file_vtadmin_proto_msgTypes[54]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3118,7 +3228,7 @@ func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemasResponse.ProtoReflect.Descriptor instead.
func (*GetVSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{52}
+ return file_vtadmin_proto_rawDescGZIP(), []int{54}
}
func (x *GetVSchemasResponse) GetVSchemas() []*VSchema {
@@ -3139,7 +3249,7 @@ type GetVtctldsRequest struct {
func (x *GetVtctldsRequest) Reset() {
*x = GetVtctldsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[53]
+ mi := &file_vtadmin_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3152,7 +3262,7 @@ func (x *GetVtctldsRequest) String() string {
func (*GetVtctldsRequest) ProtoMessage() {}
func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[53]
+ mi := &file_vtadmin_proto_msgTypes[55]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3165,7 +3275,7 @@ func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVtctldsRequest.ProtoReflect.Descriptor instead.
func (*GetVtctldsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{53}
+ return file_vtadmin_proto_rawDescGZIP(), []int{55}
}
func (x *GetVtctldsRequest) GetClusterIds() []string {
@@ -3186,7 +3296,7 @@ type GetVtctldsResponse struct {
func (x *GetVtctldsResponse) Reset() {
*x = GetVtctldsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[54]
+ mi := &file_vtadmin_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3199,7 +3309,7 @@ func (x *GetVtctldsResponse) String() string {
func (*GetVtctldsResponse) ProtoMessage() {}
func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[54]
+ mi := &file_vtadmin_proto_msgTypes[56]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3212,7 +3322,7 @@ func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVtctldsResponse.ProtoReflect.Descriptor instead.
func (*GetVtctldsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{54}
+ return file_vtadmin_proto_rawDescGZIP(), []int{56}
}
func (x *GetVtctldsResponse) GetVtctlds() []*Vtctld {
@@ -3236,7 +3346,7 @@ type GetWorkflowRequest struct {
func (x *GetWorkflowRequest) Reset() {
*x = GetWorkflowRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[55]
+ mi := &file_vtadmin_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3249,7 +3359,7 @@ func (x *GetWorkflowRequest) String() string {
func (*GetWorkflowRequest) ProtoMessage() {}
func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[55]
+ mi := &file_vtadmin_proto_msgTypes[57]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3262,7 +3372,7 @@ func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{55}
+ return file_vtadmin_proto_rawDescGZIP(), []int{57}
}
func (x *GetWorkflowRequest) GetClusterId() string {
@@ -3322,7 +3432,7 @@ type GetWorkflowsRequest struct {
func (x *GetWorkflowsRequest) Reset() {
*x = GetWorkflowsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[56]
+ mi := &file_vtadmin_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3335,7 +3445,7 @@ func (x *GetWorkflowsRequest) String() string {
func (*GetWorkflowsRequest) ProtoMessage() {}
func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[56]
+ mi := &file_vtadmin_proto_msgTypes[58]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3348,7 +3458,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{56}
+ return file_vtadmin_proto_rawDescGZIP(), []int{58}
}
func (x *GetWorkflowsRequest) GetClusterIds() []string {
@@ -3390,7 +3500,7 @@ type GetWorkflowsResponse struct {
func (x *GetWorkflowsResponse) Reset() {
*x = GetWorkflowsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[57]
+ mi := &file_vtadmin_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3403,7 +3513,7 @@ func (x *GetWorkflowsResponse) String() string {
func (*GetWorkflowsResponse) ProtoMessage() {}
func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[57]
+ mi := &file_vtadmin_proto_msgTypes[59]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3416,7 +3526,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead.
func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{57}
+ return file_vtadmin_proto_rawDescGZIP(), []int{59}
}
func (x *GetWorkflowsResponse) GetWorkflowsByCluster() map[string]*ClusterWorkflows {
@@ -3442,7 +3552,7 @@ type PingTabletRequest struct {
func (x *PingTabletRequest) Reset() {
*x = PingTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[58]
+ mi := &file_vtadmin_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3455,7 +3565,7 @@ func (x *PingTabletRequest) String() string {
func (*PingTabletRequest) ProtoMessage() {}
func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[58]
+ mi := &file_vtadmin_proto_msgTypes[60]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3468,7 +3578,7 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead.
func (*PingTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{58}
+ return file_vtadmin_proto_rawDescGZIP(), []int{60}
}
func (x *PingTabletRequest) GetAlias() *topodata.TabletAlias {
@@ -3497,7 +3607,7 @@ type PingTabletResponse struct {
func (x *PingTabletResponse) Reset() {
*x = PingTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[59]
+ mi := &file_vtadmin_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3510,7 +3620,7 @@ func (x *PingTabletResponse) String() string {
func (*PingTabletResponse) ProtoMessage() {}
func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[59]
+ mi := &file_vtadmin_proto_msgTypes[61]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3523,7 +3633,7 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead.
func (*PingTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{59}
+ return file_vtadmin_proto_rawDescGZIP(), []int{61}
}
func (x *PingTabletResponse) GetStatus() string {
@@ -3552,7 +3662,7 @@ type PlannedFailoverShardRequest struct {
func (x *PlannedFailoverShardRequest) Reset() {
*x = PlannedFailoverShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[60]
+ mi := &file_vtadmin_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3565,7 +3675,7 @@ func (x *PlannedFailoverShardRequest) String() string {
func (*PlannedFailoverShardRequest) ProtoMessage() {}
func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[60]
+ mi := &file_vtadmin_proto_msgTypes[62]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3578,7 +3688,7 @@ func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedFailoverShardRequest.ProtoReflect.Descriptor instead.
func (*PlannedFailoverShardRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{60}
+ return file_vtadmin_proto_rawDescGZIP(), []int{62}
}
func (x *PlannedFailoverShardRequest) GetClusterId() string {
@@ -3614,7 +3724,7 @@ type PlannedFailoverShardResponse struct {
func (x *PlannedFailoverShardResponse) Reset() {
*x = PlannedFailoverShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[61]
+ mi := &file_vtadmin_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3627,7 +3737,7 @@ func (x *PlannedFailoverShardResponse) String() string {
func (*PlannedFailoverShardResponse) ProtoMessage() {}
func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[61]
+ mi := &file_vtadmin_proto_msgTypes[63]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3640,7 +3750,7 @@ func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedFailoverShardResponse.ProtoReflect.Descriptor instead.
func (*PlannedFailoverShardResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{61}
+ return file_vtadmin_proto_rawDescGZIP(), []int{63}
}
func (x *PlannedFailoverShardResponse) GetCluster() *Cluster {
@@ -3692,7 +3802,7 @@ type RebuildKeyspaceGraphRequest struct {
func (x *RebuildKeyspaceGraphRequest) Reset() {
*x = RebuildKeyspaceGraphRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[62]
+ mi := &file_vtadmin_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3705,7 +3815,7 @@ func (x *RebuildKeyspaceGraphRequest) String() string {
func (*RebuildKeyspaceGraphRequest) ProtoMessage() {}
func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[62]
+ mi := &file_vtadmin_proto_msgTypes[64]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3718,7 +3828,7 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{62}
+ return file_vtadmin_proto_rawDescGZIP(), []int{64}
}
func (x *RebuildKeyspaceGraphRequest) GetClusterId() string {
@@ -3760,7 +3870,7 @@ type RebuildKeyspaceGraphResponse struct {
func (x *RebuildKeyspaceGraphResponse) Reset() {
*x = RebuildKeyspaceGraphResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[63]
+ mi := &file_vtadmin_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3773,7 +3883,7 @@ func (x *RebuildKeyspaceGraphResponse) String() string {
func (*RebuildKeyspaceGraphResponse) ProtoMessage() {}
func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[63]
+ mi := &file_vtadmin_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3786,7 +3896,7 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{63}
+ return file_vtadmin_proto_rawDescGZIP(), []int{65}
}
func (x *RebuildKeyspaceGraphResponse) GetStatus() string {
@@ -3808,7 +3918,7 @@ type RefreshStateRequest struct {
func (x *RefreshStateRequest) Reset() {
*x = RefreshStateRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[64]
+ mi := &file_vtadmin_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3821,7 +3931,7 @@ func (x *RefreshStateRequest) String() string {
func (*RefreshStateRequest) ProtoMessage() {}
func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[64]
+ mi := &file_vtadmin_proto_msgTypes[66]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3834,7 +3944,7 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead.
func (*RefreshStateRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{64}
+ return file_vtadmin_proto_rawDescGZIP(), []int{66}
}
func (x *RefreshStateRequest) GetAlias() *topodata.TabletAlias {
@@ -3863,7 +3973,7 @@ type RefreshStateResponse struct {
func (x *RefreshStateResponse) Reset() {
*x = RefreshStateResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[65]
+ mi := &file_vtadmin_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3876,7 +3986,7 @@ func (x *RefreshStateResponse) String() string {
func (*RefreshStateResponse) ProtoMessage() {}
func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[65]
+ mi := &file_vtadmin_proto_msgTypes[67]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3889,7 +3999,7 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead.
func (*RefreshStateResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{65}
+ return file_vtadmin_proto_rawDescGZIP(), []int{67}
}
func (x *RefreshStateResponse) GetStatus() string {
@@ -3958,7 +4068,7 @@ type ReloadSchemasRequest struct {
func (x *ReloadSchemasRequest) Reset() {
*x = ReloadSchemasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[66]
+ mi := &file_vtadmin_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3971,7 +4081,7 @@ func (x *ReloadSchemasRequest) String() string {
func (*ReloadSchemasRequest) ProtoMessage() {}
func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[66]
+ mi := &file_vtadmin_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3984,7 +4094,7 @@ func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemasRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemasRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{66}
+ return file_vtadmin_proto_rawDescGZIP(), []int{68}
}
func (x *ReloadSchemasRequest) GetKeyspaces() []string {
@@ -4058,7 +4168,7 @@ type ReloadSchemasResponse struct {
func (x *ReloadSchemasResponse) Reset() {
*x = ReloadSchemasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[67]
+ mi := &file_vtadmin_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4071,7 +4181,7 @@ func (x *ReloadSchemasResponse) String() string {
func (*ReloadSchemasResponse) ProtoMessage() {}
func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[67]
+ mi := &file_vtadmin_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4084,7 +4194,7 @@ func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemasResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69}
}
func (x *ReloadSchemasResponse) GetKeyspaceResults() []*ReloadSchemasResponse_KeyspaceResult {
@@ -4124,7 +4234,7 @@ type ReloadSchemaShardRequest struct {
func (x *ReloadSchemaShardRequest) Reset() {
*x = ReloadSchemaShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[68]
+ mi := &file_vtadmin_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4137,7 +4247,7 @@ func (x *ReloadSchemaShardRequest) String() string {
func (*ReloadSchemaShardRequest) ProtoMessage() {}
func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[68]
+ mi := &file_vtadmin_proto_msgTypes[70]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4150,7 +4260,7 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{68}
+ return file_vtadmin_proto_rawDescGZIP(), []int{70}
}
func (x *ReloadSchemaShardRequest) GetClusterId() string {
@@ -4206,7 +4316,7 @@ type ReloadSchemaShardResponse struct {
func (x *ReloadSchemaShardResponse) Reset() {
*x = ReloadSchemaShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[69]
+ mi := &file_vtadmin_proto_msgTypes[71]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4219,7 +4329,7 @@ func (x *ReloadSchemaShardResponse) String() string {
func (*ReloadSchemaShardResponse) ProtoMessage() {}
func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[69]
+ mi := &file_vtadmin_proto_msgTypes[71]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4232,7 +4342,7 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{69}
+ return file_vtadmin_proto_rawDescGZIP(), []int{71}
}
func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event {
@@ -4254,7 +4364,7 @@ type RefreshTabletReplicationSourceRequest struct {
func (x *RefreshTabletReplicationSourceRequest) Reset() {
*x = RefreshTabletReplicationSourceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[70]
+ mi := &file_vtadmin_proto_msgTypes[72]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4267,7 +4377,7 @@ func (x *RefreshTabletReplicationSourceRequest) String() string {
func (*RefreshTabletReplicationSourceRequest) ProtoMessage() {}
func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[70]
+ mi := &file_vtadmin_proto_msgTypes[72]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4280,7 +4390,7 @@ func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Mess
// Deprecated: Use RefreshTabletReplicationSourceRequest.ProtoReflect.Descriptor instead.
func (*RefreshTabletReplicationSourceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{70}
+ return file_vtadmin_proto_rawDescGZIP(), []int{72}
}
func (x *RefreshTabletReplicationSourceRequest) GetAlias() *topodata.TabletAlias {
@@ -4311,7 +4421,7 @@ type RefreshTabletReplicationSourceResponse struct {
func (x *RefreshTabletReplicationSourceResponse) Reset() {
*x = RefreshTabletReplicationSourceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[71]
+ mi := &file_vtadmin_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4324,7 +4434,7 @@ func (x *RefreshTabletReplicationSourceResponse) String() string {
func (*RefreshTabletReplicationSourceResponse) ProtoMessage() {}
func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[71]
+ mi := &file_vtadmin_proto_msgTypes[73]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4337,7 +4447,7 @@ func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Mes
// Deprecated: Use RefreshTabletReplicationSourceResponse.ProtoReflect.Descriptor instead.
func (*RefreshTabletReplicationSourceResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{71}
+ return file_vtadmin_proto_rawDescGZIP(), []int{73}
}
func (x *RefreshTabletReplicationSourceResponse) GetKeyspace() string {
@@ -4383,7 +4493,7 @@ type RemoveKeyspaceCellRequest struct {
func (x *RemoveKeyspaceCellRequest) Reset() {
*x = RemoveKeyspaceCellRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[72]
+ mi := &file_vtadmin_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4396,7 +4506,7 @@ func (x *RemoveKeyspaceCellRequest) String() string {
func (*RemoveKeyspaceCellRequest) ProtoMessage() {}
func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[72]
+ mi := &file_vtadmin_proto_msgTypes[74]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4409,7 +4519,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{72}
+ return file_vtadmin_proto_rawDescGZIP(), []int{74}
}
func (x *RemoveKeyspaceCellRequest) GetClusterId() string {
@@ -4458,7 +4568,7 @@ type RemoveKeyspaceCellResponse struct {
func (x *RemoveKeyspaceCellResponse) Reset() {
*x = RemoveKeyspaceCellResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[73]
+ mi := &file_vtadmin_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4471,7 +4581,7 @@ func (x *RemoveKeyspaceCellResponse) String() string {
func (*RemoveKeyspaceCellResponse) ProtoMessage() {}
func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[73]
+ mi := &file_vtadmin_proto_msgTypes[75]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4484,7 +4594,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{73}
+ return file_vtadmin_proto_rawDescGZIP(), []int{75}
}
func (x *RemoveKeyspaceCellResponse) GetStatus() string {
@@ -4506,7 +4616,7 @@ type RunHealthCheckRequest struct {
func (x *RunHealthCheckRequest) Reset() {
*x = RunHealthCheckRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[74]
+ mi := &file_vtadmin_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4519,7 +4629,7 @@ func (x *RunHealthCheckRequest) String() string {
func (*RunHealthCheckRequest) ProtoMessage() {}
func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[74]
+ mi := &file_vtadmin_proto_msgTypes[76]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4532,7 +4642,7 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead.
func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{74}
+ return file_vtadmin_proto_rawDescGZIP(), []int{76}
}
func (x *RunHealthCheckRequest) GetAlias() *topodata.TabletAlias {
@@ -4561,7 +4671,7 @@ type RunHealthCheckResponse struct {
func (x *RunHealthCheckResponse) Reset() {
*x = RunHealthCheckResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[75]
+ mi := &file_vtadmin_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4574,7 +4684,7 @@ func (x *RunHealthCheckResponse) String() string {
func (*RunHealthCheckResponse) ProtoMessage() {}
func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[75]
+ mi := &file_vtadmin_proto_msgTypes[77]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4587,7 +4697,7 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead.
func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{75}
+ return file_vtadmin_proto_rawDescGZIP(), []int{77}
}
func (x *RunHealthCheckResponse) GetStatus() string {
@@ -4616,7 +4726,7 @@ type SetReadOnlyRequest struct {
func (x *SetReadOnlyRequest) Reset() {
*x = SetReadOnlyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[76]
+ mi := &file_vtadmin_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4629,7 +4739,7 @@ func (x *SetReadOnlyRequest) String() string {
func (*SetReadOnlyRequest) ProtoMessage() {}
func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[76]
+ mi := &file_vtadmin_proto_msgTypes[78]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4642,7 +4752,7 @@ func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadOnlyRequest.ProtoReflect.Descriptor instead.
func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{76}
+ return file_vtadmin_proto_rawDescGZIP(), []int{78}
}
func (x *SetReadOnlyRequest) GetAlias() *topodata.TabletAlias {
@@ -4668,7 +4778,7 @@ type SetReadOnlyResponse struct {
func (x *SetReadOnlyResponse) Reset() {
*x = SetReadOnlyResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[77]
+ mi := &file_vtadmin_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4681,7 +4791,7 @@ func (x *SetReadOnlyResponse) String() string {
func (*SetReadOnlyResponse) ProtoMessage() {}
func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[77]
+ mi := &file_vtadmin_proto_msgTypes[79]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4694,7 +4804,7 @@ func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadOnlyResponse.ProtoReflect.Descriptor instead.
func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{77}
+ return file_vtadmin_proto_rawDescGZIP(), []int{79}
}
type SetReadWriteRequest struct {
@@ -4709,7 +4819,7 @@ type SetReadWriteRequest struct {
func (x *SetReadWriteRequest) Reset() {
*x = SetReadWriteRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[78]
+ mi := &file_vtadmin_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4722,7 +4832,7 @@ func (x *SetReadWriteRequest) String() string {
func (*SetReadWriteRequest) ProtoMessage() {}
func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[78]
+ mi := &file_vtadmin_proto_msgTypes[80]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4735,7 +4845,7 @@ func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadWriteRequest.ProtoReflect.Descriptor instead.
func (*SetReadWriteRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{78}
+ return file_vtadmin_proto_rawDescGZIP(), []int{80}
}
func (x *SetReadWriteRequest) GetAlias() *topodata.TabletAlias {
@@ -4761,7 +4871,7 @@ type SetReadWriteResponse struct {
func (x *SetReadWriteResponse) Reset() {
*x = SetReadWriteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[79]
+ mi := &file_vtadmin_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4774,7 +4884,7 @@ func (x *SetReadWriteResponse) String() string {
func (*SetReadWriteResponse) ProtoMessage() {}
func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[79]
+ mi := &file_vtadmin_proto_msgTypes[81]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4787,7 +4897,7 @@ func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetReadWriteResponse.ProtoReflect.Descriptor instead.
func (*SetReadWriteResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{79}
+ return file_vtadmin_proto_rawDescGZIP(), []int{81}
}
type StartReplicationRequest struct {
@@ -4802,7 +4912,7 @@ type StartReplicationRequest struct {
func (x *StartReplicationRequest) Reset() {
*x = StartReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[80]
+ mi := &file_vtadmin_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4815,7 +4925,7 @@ func (x *StartReplicationRequest) String() string {
func (*StartReplicationRequest) ProtoMessage() {}
func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[80]
+ mi := &file_vtadmin_proto_msgTypes[82]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4828,7 +4938,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead.
func (*StartReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{80}
+ return file_vtadmin_proto_rawDescGZIP(), []int{82}
}
func (x *StartReplicationRequest) GetAlias() *topodata.TabletAlias {
@@ -4857,7 +4967,7 @@ type StartReplicationResponse struct {
func (x *StartReplicationResponse) Reset() {
*x = StartReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[81]
+ mi := &file_vtadmin_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4870,7 +4980,7 @@ func (x *StartReplicationResponse) String() string {
func (*StartReplicationResponse) ProtoMessage() {}
func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[81]
+ mi := &file_vtadmin_proto_msgTypes[83]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4883,7 +4993,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead.
func (*StartReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{81}
+ return file_vtadmin_proto_rawDescGZIP(), []int{83}
}
func (x *StartReplicationResponse) GetStatus() string {
@@ -4912,7 +5022,7 @@ type StopReplicationRequest struct {
func (x *StopReplicationRequest) Reset() {
*x = StopReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[82]
+ mi := &file_vtadmin_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4925,7 +5035,7 @@ func (x *StopReplicationRequest) String() string {
func (*StopReplicationRequest) ProtoMessage() {}
func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[82]
+ mi := &file_vtadmin_proto_msgTypes[84]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4938,7 +5048,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead.
func (*StopReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{82}
+ return file_vtadmin_proto_rawDescGZIP(), []int{84}
}
func (x *StopReplicationRequest) GetAlias() *topodata.TabletAlias {
@@ -4967,7 +5077,7 @@ type StopReplicationResponse struct {
func (x *StopReplicationResponse) Reset() {
*x = StopReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[83]
+ mi := &file_vtadmin_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4980,7 +5090,7 @@ func (x *StopReplicationResponse) String() string {
func (*StopReplicationResponse) ProtoMessage() {}
func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[83]
+ mi := &file_vtadmin_proto_msgTypes[85]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4993,7 +5103,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead.
func (*StopReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{83}
+ return file_vtadmin_proto_rawDescGZIP(), []int{85}
}
func (x *StopReplicationResponse) GetStatus() string {
@@ -5024,7 +5134,7 @@ type TabletExternallyPromotedRequest struct {
func (x *TabletExternallyPromotedRequest) Reset() {
*x = TabletExternallyPromotedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[84]
+ mi := &file_vtadmin_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5037,7 +5147,7 @@ func (x *TabletExternallyPromotedRequest) String() string {
func (*TabletExternallyPromotedRequest) ProtoMessage() {}
func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[84]
+ mi := &file_vtadmin_proto_msgTypes[86]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5050,7 +5160,7 @@ func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use TabletExternallyPromotedRequest.ProtoReflect.Descriptor instead.
func (*TabletExternallyPromotedRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{84}
+ return file_vtadmin_proto_rawDescGZIP(), []int{86}
}
func (x *TabletExternallyPromotedRequest) GetAlias() *topodata.TabletAlias {
@@ -5082,7 +5192,7 @@ type TabletExternallyPromotedResponse struct {
func (x *TabletExternallyPromotedResponse) Reset() {
*x = TabletExternallyPromotedResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[85]
+ mi := &file_vtadmin_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5095,7 +5205,7 @@ func (x *TabletExternallyPromotedResponse) String() string {
func (*TabletExternallyPromotedResponse) ProtoMessage() {}
func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[85]
+ mi := &file_vtadmin_proto_msgTypes[87]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5108,7 +5218,7 @@ func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use TabletExternallyPromotedResponse.ProtoReflect.Descriptor instead.
func (*TabletExternallyPromotedResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{85}
+ return file_vtadmin_proto_rawDescGZIP(), []int{87}
}
func (x *TabletExternallyPromotedResponse) GetCluster() *Cluster {
@@ -5158,7 +5268,7 @@ type TabletExternallyReparentedRequest struct {
func (x *TabletExternallyReparentedRequest) Reset() {
*x = TabletExternallyReparentedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[86]
+ mi := &file_vtadmin_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5171,7 +5281,7 @@ func (x *TabletExternallyReparentedRequest) String() string {
func (*TabletExternallyReparentedRequest) ProtoMessage() {}
func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[86]
+ mi := &file_vtadmin_proto_msgTypes[88]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5184,7 +5294,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead.
func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{86}
+ return file_vtadmin_proto_rawDescGZIP(), []int{88}
}
func (x *TabletExternallyReparentedRequest) GetAlias() *topodata.TabletAlias {
@@ -5201,6 +5311,61 @@ func (x *TabletExternallyReparentedRequest) GetClusterIds() []string {
return nil
}
+type ValidateRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"`
+}
+
+func (x *ValidateRequest) Reset() {
+ *x = ValidateRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[89]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateRequest) ProtoMessage() {}
+
+func (x *ValidateRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[89]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead.
+func (*ValidateRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{89}
+}
+
+func (x *ValidateRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *ValidateRequest) GetPingTablets() bool {
+ if x != nil {
+ return x.PingTablets
+ }
+ return false
+}
+
type ValidateKeyspaceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5214,7 +5379,7 @@ type ValidateKeyspaceRequest struct {
func (x *ValidateKeyspaceRequest) Reset() {
*x = ValidateKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[87]
+ mi := &file_vtadmin_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5227,7 +5392,7 @@ func (x *ValidateKeyspaceRequest) String() string {
func (*ValidateKeyspaceRequest) ProtoMessage() {}
func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[87]
+ mi := &file_vtadmin_proto_msgTypes[90]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5240,7 +5405,7 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{87}
+ return file_vtadmin_proto_rawDescGZIP(), []int{90}
}
func (x *ValidateKeyspaceRequest) GetClusterId() string {
@@ -5276,7 +5441,7 @@ type ValidateSchemaKeyspaceRequest struct {
func (x *ValidateSchemaKeyspaceRequest) Reset() {
*x = ValidateSchemaKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[88]
+ mi := &file_vtadmin_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5289,7 +5454,7 @@ func (x *ValidateSchemaKeyspaceRequest) String() string {
func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {}
func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[88]
+ mi := &file_vtadmin_proto_msgTypes[91]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5302,7 +5467,7 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{88}
+ return file_vtadmin_proto_rawDescGZIP(), []int{91}
}
func (x *ValidateSchemaKeyspaceRequest) GetClusterId() string {
@@ -5319,32 +5484,34 @@ func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string {
return ""
}
-type ValidateVersionKeyspaceRequest struct {
+type ValidateShardRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
- Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"`
+ PingTablets bool `protobuf:"varint,4,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"`
}
-func (x *ValidateVersionKeyspaceRequest) Reset() {
- *x = ValidateVersionKeyspaceRequest{}
+func (x *ValidateShardRequest) Reset() {
+ *x = ValidateShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[89]
+ mi := &file_vtadmin_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ValidateVersionKeyspaceRequest) String() string {
+func (x *ValidateShardRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ValidateVersionKeyspaceRequest) ProtoMessage() {}
+func (*ValidateShardRequest) ProtoMessage() {}
-func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[89]
+func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[92]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5355,52 +5522,184 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead.
-func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{89}
+// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead.
+func (*ValidateShardRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{92}
}
-func (x *ValidateVersionKeyspaceRequest) GetClusterId() string {
+func (x *ValidateShardRequest) GetClusterId() string {
if x != nil {
return x.ClusterId
}
return ""
}
-func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string {
+func (x *ValidateShardRequest) GetKeyspace() string {
if x != nil {
return x.Keyspace
}
return ""
}
-type VTExplainRequest struct {
+func (x *ValidateShardRequest) GetShard() string {
+ if x != nil {
+ return x.Shard
+ }
+ return ""
+}
+
+func (x *ValidateShardRequest) GetPingTablets() bool {
+ if x != nil {
+ return x.PingTablets
+ }
+ return false
+}
+
+type ValidateVersionKeyspaceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
- Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
- Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
}
-func (x *VTExplainRequest) Reset() {
- *x = VTExplainRequest{}
+func (x *ValidateVersionKeyspaceRequest) Reset() {
+ *x = ValidateVersionKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[90]
+ mi := &file_vtadmin_proto_msgTypes[93]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *VTExplainRequest) String() string {
+func (x *ValidateVersionKeyspaceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*VTExplainRequest) ProtoMessage() {}
+func (*ValidateVersionKeyspaceRequest) ProtoMessage() {}
-func (x *VTExplainRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[90]
+func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[93]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead.
+func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{93}
+}
+
+func (x *ValidateVersionKeyspaceRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+type ValidateVersionShardRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"`
+}
+
+func (x *ValidateVersionShardRequest) Reset() {
+ *x = ValidateVersionShardRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[94]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateVersionShardRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateVersionShardRequest) ProtoMessage() {}
+
+func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[94]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead.
+func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) {
+ return file_vtadmin_proto_rawDescGZIP(), []int{94}
+}
+
+func (x *ValidateVersionShardRequest) GetClusterId() string {
+ if x != nil {
+ return x.ClusterId
+ }
+ return ""
+}
+
+func (x *ValidateVersionShardRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+func (x *ValidateVersionShardRequest) GetShard() string {
+ if x != nil {
+ return x.Shard
+ }
+ return ""
+}
+
+type VTExplainRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
+}
+
+func (x *VTExplainRequest) Reset() {
+ *x = VTExplainRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtadmin_proto_msgTypes[95]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VTExplainRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VTExplainRequest) ProtoMessage() {}
+
+func (x *VTExplainRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtadmin_proto_msgTypes[95]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5413,7 +5712,7 @@ func (x *VTExplainRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use VTExplainRequest.ProtoReflect.Descriptor instead.
func (*VTExplainRequest) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{90}
+ return file_vtadmin_proto_rawDescGZIP(), []int{95}
}
func (x *VTExplainRequest) GetCluster() string {
@@ -5448,7 +5747,7 @@ type VTExplainResponse struct {
func (x *VTExplainResponse) Reset() {
*x = VTExplainResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[91]
+ mi := &file_vtadmin_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5461,7 +5760,7 @@ func (x *VTExplainResponse) String() string {
func (*VTExplainResponse) ProtoMessage() {}
func (x *VTExplainResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[91]
+ mi := &file_vtadmin_proto_msgTypes[96]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5474,7 +5773,7 @@ func (x *VTExplainResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use VTExplainResponse.ProtoReflect.Descriptor instead.
func (*VTExplainResponse) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{91}
+ return file_vtadmin_proto_rawDescGZIP(), []int{96}
}
func (x *VTExplainResponse) GetResponse() string {
@@ -5496,7 +5795,7 @@ type Schema_ShardTableSize struct {
func (x *Schema_ShardTableSize) Reset() {
*x = Schema_ShardTableSize{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[95]
+ mi := &file_vtadmin_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5509,7 +5808,7 @@ func (x *Schema_ShardTableSize) String() string {
func (*Schema_ShardTableSize) ProtoMessage() {}
func (x *Schema_ShardTableSize) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[95]
+ mi := &file_vtadmin_proto_msgTypes[100]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5554,7 +5853,7 @@ type Schema_TableSize struct {
func (x *Schema_TableSize) Reset() {
*x = Schema_TableSize{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[96]
+ mi := &file_vtadmin_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5567,7 +5866,7 @@ func (x *Schema_TableSize) String() string {
func (*Schema_TableSize) ProtoMessage() {}
func (x *Schema_TableSize) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[96]
+ mi := &file_vtadmin_proto_msgTypes[101]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5622,7 +5921,7 @@ type ReloadSchemasResponse_KeyspaceResult struct {
func (x *ReloadSchemasResponse_KeyspaceResult) Reset() {
*x = ReloadSchemasResponse_KeyspaceResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[99]
+ mi := &file_vtadmin_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5635,7 +5934,7 @@ func (x *ReloadSchemasResponse_KeyspaceResult) String() string {
func (*ReloadSchemasResponse_KeyspaceResult) ProtoMessage() {}
func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[99]
+ mi := &file_vtadmin_proto_msgTypes[104]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5648,7 +5947,7 @@ func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Messa
// Deprecated: Use ReloadSchemasResponse_KeyspaceResult.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse_KeyspaceResult) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67, 0}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69, 0}
}
func (x *ReloadSchemasResponse_KeyspaceResult) GetKeyspace() *Keyspace {
@@ -5683,7 +5982,7 @@ type ReloadSchemasResponse_ShardResult struct {
func (x *ReloadSchemasResponse_ShardResult) Reset() {
*x = ReloadSchemasResponse_ShardResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[100]
+ mi := &file_vtadmin_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5696,7 +5995,7 @@ func (x *ReloadSchemasResponse_ShardResult) String() string {
func (*ReloadSchemasResponse_ShardResult) ProtoMessage() {}
func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[100]
+ mi := &file_vtadmin_proto_msgTypes[105]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5709,7 +6008,7 @@ func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message
// Deprecated: Use ReloadSchemasResponse_ShardResult.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse_ShardResult) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67, 1}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69, 1}
}
func (x *ReloadSchemasResponse_ShardResult) GetShard() *Shard {
@@ -5745,7 +6044,7 @@ type ReloadSchemasResponse_TabletResult struct {
func (x *ReloadSchemasResponse_TabletResult) Reset() {
*x = ReloadSchemasResponse_TabletResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtadmin_proto_msgTypes[101]
+ mi := &file_vtadmin_proto_msgTypes[106]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5758,7 +6057,7 @@ func (x *ReloadSchemasResponse_TabletResult) String() string {
func (*ReloadSchemasResponse_TabletResult) ProtoMessage() {}
func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message {
- mi := &file_vtadmin_proto_msgTypes[101]
+ mi := &file_vtadmin_proto_msgTypes[106]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5771,7 +6070,7 @@ func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message
// Deprecated: Use ReloadSchemasResponse_TabletResult.ProtoReflect.Descriptor instead.
func (*ReloadSchemasResponse_TabletResult) Descriptor() ([]byte, []int) {
- return file_vtadmin_proto_rawDescGZIP(), []int{67, 2}
+ return file_vtadmin_proto_rawDescGZIP(), []int{69, 2}
}
func (x *ReloadSchemasResponse_TabletResult) GetTablet() *Tablet {
@@ -6089,667 +6388,726 @@ var file_vtadmin_proto_rawDesc = []byte{
0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a,
0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x32, 0x0a, 0x0f, 0x47,
- 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
- 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22,
- 0x39, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x47,
- 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x12, 0x47, 0x65,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x36, 0x0a, 0x13, 0x47,
- 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x62, 0x0a, 0x14, 0x47,
+ 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22,
+ 0x32, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a,
- 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a,
- 0x12, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x8d,
- 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x49, 0x64, 0x73, 0x22, 0x39, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x22, 0x4f,
+ 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
+ 0x36, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x85,
- 0x01, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76,
- 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
- 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c,
- 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x22, 0xb5, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12,
+ 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63,
- 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c,
- 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0d, 0x73,
- 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76,
- 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x67, 0x67,
- 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x69,
- 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x60, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x47, 0x65,
- 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73,
- 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65,
+ 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49,
+ 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
+ 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56,
0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x34,
- 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x37, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x73, 0x72, 0x76,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12,
+ 0x3b, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x6e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x60, 0x0a, 0x10,
+ 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x34,
+ 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x52, 0x07, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72,
- 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa0, 0x01, 0x0a,
- 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71,
+ 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x4b, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f,
+ 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12,
+ 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x13, 0x47, 0x65, 0x74,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22,
+ 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69,
- 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f,
- 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22,
- 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42,
- 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x77,
- 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79,
- 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57,
- 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f,
- 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x40,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e,
- 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c,
- 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12,
- 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x1b, 0x52, 0x65,
- 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61,
- 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
- 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22,
- 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65,
- 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
+ 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x52, 0x07, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa0, 0x01,
+ 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x77, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
+ 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12,
+ 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42,
+ 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69,
+ 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c,
+ 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61,
+ 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69,
+ 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a,
+ 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x1b, 0x52,
+ 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72,
+ 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+ 0x22, 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72,
+ 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5a, 0x0a,
+ 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a,
+ 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x52, 0x65,
+ 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a,
+ 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72,
+ 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63,
+ 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xad, 0x04, 0x0a, 0x15,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0f,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12,
+ 0x4f, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x12, 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5b, 0x0a,
+ 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x0c, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x27, 0x0a, 0x06, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x18,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69,
+ 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27,
+ 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65,
+ 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f,
+ 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c,
+ 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c,
+ 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x75,
+ 0x0a, 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61,
+ 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73,
+ 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22,
+ 0x9e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
+ 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65,
+ 0x22, 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16,
+ 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5c, 0x0a,
+ 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x12, 0x53,
+ 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22,
+ 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61,
+ 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a,
+ 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53,
+ 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5a, 0x0a, 0x14,
- 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e,
- 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52,
- 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x52, 0x65, 0x6c,
- 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12,
- 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d,
- 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69,
- 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xad, 0x04, 0x0a, 0x15, 0x52,
- 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x4f,
- 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12,
- 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5b, 0x0a, 0x0b,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x74, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x0c, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x27, 0x0a, 0x06, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x18, 0x52,
- 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74,
- 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a,
- 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
- 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f,
- 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x75, 0x0a,
- 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
+ 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5e, 0x0a, 0x18,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x66, 0x0a, 0x16,
+ 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c,
0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9e,
- 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66,
- 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63,
- 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22,
- 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c,
- 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
- 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5c, 0x0a, 0x16,
- 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a,
- 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x12, 0x53, 0x65,
- 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a,
- 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x15,
- 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64,
- 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05,
+ 0x72, 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c,
+ 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
+ 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64,
+ 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05,
0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a,
- 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x65,
- 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a,
- 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5e, 0x0a, 0x18, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x66, 0x0a, 0x16, 0x53,
- 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16,
- 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
- 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75,
- 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36,
- 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73,
- 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x53, 0x0a, 0x0f, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c,
+ 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22,
+ 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e,
+ 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21,
+ 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x73, 0x22, 0x5b, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5b,
- 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5a, 0x0a, 0x10, 0x56,
- 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x2f, 0x0a, 0x11, 0x56, 0x54, 0x45, 0x78, 0x70,
- 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08,
- 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf9, 0x1c, 0x0a, 0x07, 0x56, 0x54, 0x41,
- 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x6e,
+ 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5a,
+ 0x0a, 0x10, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x2f, 0x0a, 0x11, 0x56, 0x54,
+ 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa9, 0x20, 0x0a, 0x07,
+ 0x56, 0x54, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f,
- 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12,
- 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b,
- 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f,
- 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c,
- 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67,
- 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x46,
- 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x6b, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61,
+ 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46,
+ 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d,
+ 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b,
+ 0x0a, 0x0a, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47,
+ 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49,
+ 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
+ 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47,
+ 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75,
+ 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x47,
+ 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47,
+ 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f,
+ 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x00, 0x12,
+ 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12,
+ 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39,
+ 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
+ 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53,
+ 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a,
+ 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
- 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43,
- 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c,
- 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73,
- 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61,
- 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22,
- 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
- 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22,
- 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12,
- 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0a, 0x47, 0x65,
- 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x56,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
- 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74,
- 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a,
- 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
- 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x00, 0x12, 0x4d,
- 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1c,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
- 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a,
- 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65,
- 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64,
- 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50,
- 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a,
- 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47,
- 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52,
- 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66,
- 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x52, 0x65, 0x6c,
- 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47,
+ 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74,
+ 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0a, 0x47,
+ 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65,
+ 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f,
+ 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
+ 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x00, 0x12,
+ 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12,
+ 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
+ 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47,
+ 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e,
+ 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12,
+ 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65,
+ 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65,
+ 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65,
+ 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x52, 0x65,
+ 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x61,
0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x11, 0x52,
+ 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x11,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f,
+ 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52,
0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61,
- 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65,
- 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x12, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12,
- 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x12, 0x52, 0x65,
0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x52, 0x75,
- 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1e, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76,
- 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64,
- 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c,
- 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x53,
- 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x10, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20,
- 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x12, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c,
+ 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x52,
+ 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1e, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12,
+ 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e,
+ 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c,
+ 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76,
+ 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x10, 0x53,
+ 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72,
+ 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64,
0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a,
- 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c,
- 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
- 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72,
- 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x5b, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a,
- 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44,
- 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x2e, 0x76, 0x74,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69,
- 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71,
+ 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50,
+ 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x43, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e,
+ 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x44, 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x19,
+ 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73,
+ 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76,
+ 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -6765,7 +7123,7 @@ func file_vtadmin_proto_rawDescGZIP() []byte {
}
var file_vtadmin_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 102)
+var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 107)
var file_vtadmin_proto_goTypes = []interface{}{
(Tablet_ServingState)(0), // 0: vtadmin.Tablet.ServingState
(*Cluster)(nil), // 1: vtadmin.Cluster
@@ -6801,293 +7159,314 @@ var file_vtadmin_proto_goTypes = []interface{}{
(*GetCellsAliasesResponse)(nil), // 31: vtadmin.GetCellsAliasesResponse
(*GetClustersRequest)(nil), // 32: vtadmin.GetClustersRequest
(*GetClustersResponse)(nil), // 33: vtadmin.GetClustersResponse
- (*GetGatesRequest)(nil), // 34: vtadmin.GetGatesRequest
- (*GetGatesResponse)(nil), // 35: vtadmin.GetGatesResponse
- (*GetKeyspaceRequest)(nil), // 36: vtadmin.GetKeyspaceRequest
- (*GetKeyspacesRequest)(nil), // 37: vtadmin.GetKeyspacesRequest
- (*GetKeyspacesResponse)(nil), // 38: vtadmin.GetKeyspacesResponse
- (*GetSchemaRequest)(nil), // 39: vtadmin.GetSchemaRequest
- (*GetSchemasRequest)(nil), // 40: vtadmin.GetSchemasRequest
- (*GetSchemasResponse)(nil), // 41: vtadmin.GetSchemasResponse
- (*GetShardReplicationPositionsRequest)(nil), // 42: vtadmin.GetShardReplicationPositionsRequest
- (*GetShardReplicationPositionsResponse)(nil), // 43: vtadmin.GetShardReplicationPositionsResponse
- (*GetSrvVSchemaRequest)(nil), // 44: vtadmin.GetSrvVSchemaRequest
- (*GetSrvVSchemasRequest)(nil), // 45: vtadmin.GetSrvVSchemasRequest
- (*GetSrvVSchemasResponse)(nil), // 46: vtadmin.GetSrvVSchemasResponse
- (*GetSchemaTableSizeOptions)(nil), // 47: vtadmin.GetSchemaTableSizeOptions
- (*GetTabletRequest)(nil), // 48: vtadmin.GetTabletRequest
- (*GetTabletsRequest)(nil), // 49: vtadmin.GetTabletsRequest
- (*GetTabletsResponse)(nil), // 50: vtadmin.GetTabletsResponse
- (*GetVSchemaRequest)(nil), // 51: vtadmin.GetVSchemaRequest
- (*GetVSchemasRequest)(nil), // 52: vtadmin.GetVSchemasRequest
- (*GetVSchemasResponse)(nil), // 53: vtadmin.GetVSchemasResponse
- (*GetVtctldsRequest)(nil), // 54: vtadmin.GetVtctldsRequest
- (*GetVtctldsResponse)(nil), // 55: vtadmin.GetVtctldsResponse
- (*GetWorkflowRequest)(nil), // 56: vtadmin.GetWorkflowRequest
- (*GetWorkflowsRequest)(nil), // 57: vtadmin.GetWorkflowsRequest
- (*GetWorkflowsResponse)(nil), // 58: vtadmin.GetWorkflowsResponse
- (*PingTabletRequest)(nil), // 59: vtadmin.PingTabletRequest
- (*PingTabletResponse)(nil), // 60: vtadmin.PingTabletResponse
- (*PlannedFailoverShardRequest)(nil), // 61: vtadmin.PlannedFailoverShardRequest
- (*PlannedFailoverShardResponse)(nil), // 62: vtadmin.PlannedFailoverShardResponse
- (*RebuildKeyspaceGraphRequest)(nil), // 63: vtadmin.RebuildKeyspaceGraphRequest
- (*RebuildKeyspaceGraphResponse)(nil), // 64: vtadmin.RebuildKeyspaceGraphResponse
- (*RefreshStateRequest)(nil), // 65: vtadmin.RefreshStateRequest
- (*RefreshStateResponse)(nil), // 66: vtadmin.RefreshStateResponse
- (*ReloadSchemasRequest)(nil), // 67: vtadmin.ReloadSchemasRequest
- (*ReloadSchemasResponse)(nil), // 68: vtadmin.ReloadSchemasResponse
- (*ReloadSchemaShardRequest)(nil), // 69: vtadmin.ReloadSchemaShardRequest
- (*ReloadSchemaShardResponse)(nil), // 70: vtadmin.ReloadSchemaShardResponse
- (*RefreshTabletReplicationSourceRequest)(nil), // 71: vtadmin.RefreshTabletReplicationSourceRequest
- (*RefreshTabletReplicationSourceResponse)(nil), // 72: vtadmin.RefreshTabletReplicationSourceResponse
- (*RemoveKeyspaceCellRequest)(nil), // 73: vtadmin.RemoveKeyspaceCellRequest
- (*RemoveKeyspaceCellResponse)(nil), // 74: vtadmin.RemoveKeyspaceCellResponse
- (*RunHealthCheckRequest)(nil), // 75: vtadmin.RunHealthCheckRequest
- (*RunHealthCheckResponse)(nil), // 76: vtadmin.RunHealthCheckResponse
- (*SetReadOnlyRequest)(nil), // 77: vtadmin.SetReadOnlyRequest
- (*SetReadOnlyResponse)(nil), // 78: vtadmin.SetReadOnlyResponse
- (*SetReadWriteRequest)(nil), // 79: vtadmin.SetReadWriteRequest
- (*SetReadWriteResponse)(nil), // 80: vtadmin.SetReadWriteResponse
- (*StartReplicationRequest)(nil), // 81: vtadmin.StartReplicationRequest
- (*StartReplicationResponse)(nil), // 82: vtadmin.StartReplicationResponse
- (*StopReplicationRequest)(nil), // 83: vtadmin.StopReplicationRequest
- (*StopReplicationResponse)(nil), // 84: vtadmin.StopReplicationResponse
- (*TabletExternallyPromotedRequest)(nil), // 85: vtadmin.TabletExternallyPromotedRequest
- (*TabletExternallyPromotedResponse)(nil), // 86: vtadmin.TabletExternallyPromotedResponse
- (*TabletExternallyReparentedRequest)(nil), // 87: vtadmin.TabletExternallyReparentedRequest
- (*ValidateKeyspaceRequest)(nil), // 88: vtadmin.ValidateKeyspaceRequest
- (*ValidateSchemaKeyspaceRequest)(nil), // 89: vtadmin.ValidateSchemaKeyspaceRequest
- (*ValidateVersionKeyspaceRequest)(nil), // 90: vtadmin.ValidateVersionKeyspaceRequest
- (*VTExplainRequest)(nil), // 91: vtadmin.VTExplainRequest
- (*VTExplainResponse)(nil), // 92: vtadmin.VTExplainResponse
- nil, // 93: vtadmin.ClusterCellsAliases.AliasesEntry
- nil, // 94: vtadmin.Keyspace.ShardsEntry
- nil, // 95: vtadmin.Schema.TableSizesEntry
- (*Schema_ShardTableSize)(nil), // 96: vtadmin.Schema.ShardTableSize
- (*Schema_TableSize)(nil), // 97: vtadmin.Schema.TableSize
- nil, // 98: vtadmin.Schema.TableSize.ByShardEntry
- nil, // 99: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
- (*ReloadSchemasResponse_KeyspaceResult)(nil), // 100: vtadmin.ReloadSchemasResponse.KeyspaceResult
- (*ReloadSchemasResponse_ShardResult)(nil), // 101: vtadmin.ReloadSchemasResponse.ShardResult
- (*ReloadSchemasResponse_TabletResult)(nil), // 102: vtadmin.ReloadSchemasResponse.TabletResult
- (*mysqlctl.BackupInfo)(nil), // 103: mysqlctl.BackupInfo
- (*topodata.CellInfo)(nil), // 104: topodata.CellInfo
- (*vtctldata.ShardReplicationPositionsResponse)(nil), // 105: vtctldata.ShardReplicationPositionsResponse
- (*vtctldata.Keyspace)(nil), // 106: vtctldata.Keyspace
- (*tabletmanagerdata.TableDefinition)(nil), // 107: tabletmanagerdata.TableDefinition
- (*vtctldata.Shard)(nil), // 108: vtctldata.Shard
- (*vschema.SrvVSchema)(nil), // 109: vschema.SrvVSchema
- (*topodata.Tablet)(nil), // 110: topodata.Tablet
- (*vschema.Keyspace)(nil), // 111: vschema.Keyspace
- (*vtctldata.Workflow)(nil), // 112: vtctldata.Workflow
- (*vtctldata.CreateKeyspaceRequest)(nil), // 113: vtctldata.CreateKeyspaceRequest
- (*vtctldata.CreateShardRequest)(nil), // 114: vtctldata.CreateShardRequest
- (*vtctldata.DeleteKeyspaceRequest)(nil), // 115: vtctldata.DeleteKeyspaceRequest
- (*vtctldata.DeleteShardsRequest)(nil), // 116: vtctldata.DeleteShardsRequest
- (*topodata.TabletAlias)(nil), // 117: topodata.TabletAlias
- (*vtctldata.EmergencyReparentShardRequest)(nil), // 118: vtctldata.EmergencyReparentShardRequest
- (*logutil.Event)(nil), // 119: logutil.Event
- (*vtctldata.GetBackupsRequest)(nil), // 120: vtctldata.GetBackupsRequest
- (*vtctldata.PlannedReparentShardRequest)(nil), // 121: vtctldata.PlannedReparentShardRequest
- (*topodata.CellsAlias)(nil), // 122: topodata.CellsAlias
- (*vtctldata.CreateShardResponse)(nil), // 123: vtctldata.CreateShardResponse
- (*vtctldata.DeleteKeyspaceResponse)(nil), // 124: vtctldata.DeleteKeyspaceResponse
- (*vtctldata.DeleteShardsResponse)(nil), // 125: vtctldata.DeleteShardsResponse
- (*vtctldata.ValidateKeyspaceResponse)(nil), // 126: vtctldata.ValidateKeyspaceResponse
- (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 127: vtctldata.ValidateSchemaKeyspaceResponse
- (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 128: vtctldata.ValidateVersionKeyspaceResponse
+ (*GetFullStatusRequest)(nil), // 34: vtadmin.GetFullStatusRequest
+ (*GetGatesRequest)(nil), // 35: vtadmin.GetGatesRequest
+ (*GetGatesResponse)(nil), // 36: vtadmin.GetGatesResponse
+ (*GetKeyspaceRequest)(nil), // 37: vtadmin.GetKeyspaceRequest
+ (*GetKeyspacesRequest)(nil), // 38: vtadmin.GetKeyspacesRequest
+ (*GetKeyspacesResponse)(nil), // 39: vtadmin.GetKeyspacesResponse
+ (*GetSchemaRequest)(nil), // 40: vtadmin.GetSchemaRequest
+ (*GetSchemasRequest)(nil), // 41: vtadmin.GetSchemasRequest
+ (*GetSchemasResponse)(nil), // 42: vtadmin.GetSchemasResponse
+ (*GetShardReplicationPositionsRequest)(nil), // 43: vtadmin.GetShardReplicationPositionsRequest
+ (*GetShardReplicationPositionsResponse)(nil), // 44: vtadmin.GetShardReplicationPositionsResponse
+ (*GetSrvVSchemaRequest)(nil), // 45: vtadmin.GetSrvVSchemaRequest
+ (*GetSrvVSchemasRequest)(nil), // 46: vtadmin.GetSrvVSchemasRequest
+ (*GetSrvVSchemasResponse)(nil), // 47: vtadmin.GetSrvVSchemasResponse
+ (*GetSchemaTableSizeOptions)(nil), // 48: vtadmin.GetSchemaTableSizeOptions
+ (*GetTabletRequest)(nil), // 49: vtadmin.GetTabletRequest
+ (*GetTabletsRequest)(nil), // 50: vtadmin.GetTabletsRequest
+ (*GetTabletsResponse)(nil), // 51: vtadmin.GetTabletsResponse
+ (*GetTopologyPathRequest)(nil), // 52: vtadmin.GetTopologyPathRequest
+ (*GetVSchemaRequest)(nil), // 53: vtadmin.GetVSchemaRequest
+ (*GetVSchemasRequest)(nil), // 54: vtadmin.GetVSchemasRequest
+ (*GetVSchemasResponse)(nil), // 55: vtadmin.GetVSchemasResponse
+ (*GetVtctldsRequest)(nil), // 56: vtadmin.GetVtctldsRequest
+ (*GetVtctldsResponse)(nil), // 57: vtadmin.GetVtctldsResponse
+ (*GetWorkflowRequest)(nil), // 58: vtadmin.GetWorkflowRequest
+ (*GetWorkflowsRequest)(nil), // 59: vtadmin.GetWorkflowsRequest
+ (*GetWorkflowsResponse)(nil), // 60: vtadmin.GetWorkflowsResponse
+ (*PingTabletRequest)(nil), // 61: vtadmin.PingTabletRequest
+ (*PingTabletResponse)(nil), // 62: vtadmin.PingTabletResponse
+ (*PlannedFailoverShardRequest)(nil), // 63: vtadmin.PlannedFailoverShardRequest
+ (*PlannedFailoverShardResponse)(nil), // 64: vtadmin.PlannedFailoverShardResponse
+ (*RebuildKeyspaceGraphRequest)(nil), // 65: vtadmin.RebuildKeyspaceGraphRequest
+ (*RebuildKeyspaceGraphResponse)(nil), // 66: vtadmin.RebuildKeyspaceGraphResponse
+ (*RefreshStateRequest)(nil), // 67: vtadmin.RefreshStateRequest
+ (*RefreshStateResponse)(nil), // 68: vtadmin.RefreshStateResponse
+ (*ReloadSchemasRequest)(nil), // 69: vtadmin.ReloadSchemasRequest
+ (*ReloadSchemasResponse)(nil), // 70: vtadmin.ReloadSchemasResponse
+ (*ReloadSchemaShardRequest)(nil), // 71: vtadmin.ReloadSchemaShardRequest
+ (*ReloadSchemaShardResponse)(nil), // 72: vtadmin.ReloadSchemaShardResponse
+ (*RefreshTabletReplicationSourceRequest)(nil), // 73: vtadmin.RefreshTabletReplicationSourceRequest
+ (*RefreshTabletReplicationSourceResponse)(nil), // 74: vtadmin.RefreshTabletReplicationSourceResponse
+ (*RemoveKeyspaceCellRequest)(nil), // 75: vtadmin.RemoveKeyspaceCellRequest
+ (*RemoveKeyspaceCellResponse)(nil), // 76: vtadmin.RemoveKeyspaceCellResponse
+ (*RunHealthCheckRequest)(nil), // 77: vtadmin.RunHealthCheckRequest
+ (*RunHealthCheckResponse)(nil), // 78: vtadmin.RunHealthCheckResponse
+ (*SetReadOnlyRequest)(nil), // 79: vtadmin.SetReadOnlyRequest
+ (*SetReadOnlyResponse)(nil), // 80: vtadmin.SetReadOnlyResponse
+ (*SetReadWriteRequest)(nil), // 81: vtadmin.SetReadWriteRequest
+ (*SetReadWriteResponse)(nil), // 82: vtadmin.SetReadWriteResponse
+ (*StartReplicationRequest)(nil), // 83: vtadmin.StartReplicationRequest
+ (*StartReplicationResponse)(nil), // 84: vtadmin.StartReplicationResponse
+ (*StopReplicationRequest)(nil), // 85: vtadmin.StopReplicationRequest
+ (*StopReplicationResponse)(nil), // 86: vtadmin.StopReplicationResponse
+ (*TabletExternallyPromotedRequest)(nil), // 87: vtadmin.TabletExternallyPromotedRequest
+ (*TabletExternallyPromotedResponse)(nil), // 88: vtadmin.TabletExternallyPromotedResponse
+ (*TabletExternallyReparentedRequest)(nil), // 89: vtadmin.TabletExternallyReparentedRequest
+ (*ValidateRequest)(nil), // 90: vtadmin.ValidateRequest
+ (*ValidateKeyspaceRequest)(nil), // 91: vtadmin.ValidateKeyspaceRequest
+ (*ValidateSchemaKeyspaceRequest)(nil), // 92: vtadmin.ValidateSchemaKeyspaceRequest
+ (*ValidateShardRequest)(nil), // 93: vtadmin.ValidateShardRequest
+ (*ValidateVersionKeyspaceRequest)(nil), // 94: vtadmin.ValidateVersionKeyspaceRequest
+ (*ValidateVersionShardRequest)(nil), // 95: vtadmin.ValidateVersionShardRequest
+ (*VTExplainRequest)(nil), // 96: vtadmin.VTExplainRequest
+ (*VTExplainResponse)(nil), // 97: vtadmin.VTExplainResponse
+ nil, // 98: vtadmin.ClusterCellsAliases.AliasesEntry
+ nil, // 99: vtadmin.Keyspace.ShardsEntry
+ nil, // 100: vtadmin.Schema.TableSizesEntry
+ (*Schema_ShardTableSize)(nil), // 101: vtadmin.Schema.ShardTableSize
+ (*Schema_TableSize)(nil), // 102: vtadmin.Schema.TableSize
+ nil, // 103: vtadmin.Schema.TableSize.ByShardEntry
+ nil, // 104: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
+ (*ReloadSchemasResponse_KeyspaceResult)(nil), // 105: vtadmin.ReloadSchemasResponse.KeyspaceResult
+ (*ReloadSchemasResponse_ShardResult)(nil), // 106: vtadmin.ReloadSchemasResponse.ShardResult
+ (*ReloadSchemasResponse_TabletResult)(nil), // 107: vtadmin.ReloadSchemasResponse.TabletResult
+ (*mysqlctl.BackupInfo)(nil), // 108: mysqlctl.BackupInfo
+ (*topodata.CellInfo)(nil), // 109: topodata.CellInfo
+ (*vtctldata.ShardReplicationPositionsResponse)(nil), // 110: vtctldata.ShardReplicationPositionsResponse
+ (*vtctldata.Keyspace)(nil), // 111: vtctldata.Keyspace
+ (*tabletmanagerdata.TableDefinition)(nil), // 112: tabletmanagerdata.TableDefinition
+ (*vtctldata.Shard)(nil), // 113: vtctldata.Shard
+ (*vschema.SrvVSchema)(nil), // 114: vschema.SrvVSchema
+ (*topodata.Tablet)(nil), // 115: topodata.Tablet
+ (*vschema.Keyspace)(nil), // 116: vschema.Keyspace
+ (*vtctldata.Workflow)(nil), // 117: vtctldata.Workflow
+ (*vtctldata.CreateKeyspaceRequest)(nil), // 118: vtctldata.CreateKeyspaceRequest
+ (*vtctldata.CreateShardRequest)(nil), // 119: vtctldata.CreateShardRequest
+ (*vtctldata.DeleteKeyspaceRequest)(nil), // 120: vtctldata.DeleteKeyspaceRequest
+ (*vtctldata.DeleteShardsRequest)(nil), // 121: vtctldata.DeleteShardsRequest
+ (*topodata.TabletAlias)(nil), // 122: topodata.TabletAlias
+ (*vtctldata.EmergencyReparentShardRequest)(nil), // 123: vtctldata.EmergencyReparentShardRequest
+ (*logutil.Event)(nil), // 124: logutil.Event
+ (*vtctldata.GetBackupsRequest)(nil), // 125: vtctldata.GetBackupsRequest
+ (*vtctldata.PlannedReparentShardRequest)(nil), // 126: vtctldata.PlannedReparentShardRequest
+ (*topodata.CellsAlias)(nil), // 127: topodata.CellsAlias
+ (*vtctldata.CreateShardResponse)(nil), // 128: vtctldata.CreateShardResponse
+ (*vtctldata.DeleteKeyspaceResponse)(nil), // 129: vtctldata.DeleteKeyspaceResponse
+ (*vtctldata.DeleteShardsResponse)(nil), // 130: vtctldata.DeleteShardsResponse
+ (*vtctldata.GetFullStatusResponse)(nil), // 131: vtctldata.GetFullStatusResponse
+ (*vtctldata.GetTopologyPathResponse)(nil), // 132: vtctldata.GetTopologyPathResponse
+ (*vtctldata.ValidateResponse)(nil), // 133: vtctldata.ValidateResponse
+ (*vtctldata.ValidateKeyspaceResponse)(nil), // 134: vtctldata.ValidateKeyspaceResponse
+ (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 135: vtctldata.ValidateSchemaKeyspaceResponse
+ (*vtctldata.ValidateShardResponse)(nil), // 136: vtctldata.ValidateShardResponse
+ (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 137: vtctldata.ValidateVersionKeyspaceResponse
+ (*vtctldata.ValidateVersionShardResponse)(nil), // 138: vtctldata.ValidateVersionShardResponse
}
var file_vtadmin_proto_depIdxs = []int32{
1, // 0: vtadmin.ClusterBackup.cluster:type_name -> vtadmin.Cluster
- 103, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo
+ 108, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo
1, // 2: vtadmin.ClusterCellsAliases.cluster:type_name -> vtadmin.Cluster
- 93, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry
+ 98, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry
1, // 4: vtadmin.ClusterCellInfo.cluster:type_name -> vtadmin.Cluster
- 104, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo
+ 109, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo
1, // 6: vtadmin.ClusterShardReplicationPosition.cluster:type_name -> vtadmin.Cluster
- 105, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse
+ 110, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse
15, // 8: vtadmin.ClusterWorkflows.workflows:type_name -> vtadmin.Workflow
1, // 9: vtadmin.Keyspace.cluster:type_name -> vtadmin.Cluster
- 106, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace
- 94, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry
+ 111, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace
+ 99, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry
1, // 12: vtadmin.Schema.cluster:type_name -> vtadmin.Cluster
- 107, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition
- 95, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry
+ 112, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition
+ 100, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry
1, // 15: vtadmin.Shard.cluster:type_name -> vtadmin.Cluster
- 108, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard
+ 113, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard
1, // 17: vtadmin.SrvVSchema.cluster:type_name -> vtadmin.Cluster
- 109, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema
+ 114, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema
1, // 19: vtadmin.Tablet.cluster:type_name -> vtadmin.Cluster
- 110, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet
+ 115, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet
0, // 21: vtadmin.Tablet.state:type_name -> vtadmin.Tablet.ServingState
1, // 22: vtadmin.VSchema.cluster:type_name -> vtadmin.Cluster
- 111, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace
+ 116, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace
1, // 24: vtadmin.Vtctld.cluster:type_name -> vtadmin.Cluster
1, // 25: vtadmin.VTGate.cluster:type_name -> vtadmin.Cluster
1, // 26: vtadmin.Workflow.cluster:type_name -> vtadmin.Cluster
- 112, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow
- 113, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest
+ 117, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow
+ 118, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest
7, // 29: vtadmin.CreateKeyspaceResponse.keyspace:type_name -> vtadmin.Keyspace
- 114, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest
- 115, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest
- 116, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest
- 117, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias
+ 119, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest
+ 120, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest
+ 121, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest
+ 122, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias
1, // 34: vtadmin.DeleteTabletResponse.cluster:type_name -> vtadmin.Cluster
- 118, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest
+ 123, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest
1, // 36: vtadmin.EmergencyFailoverShardResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 119, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event
- 47, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
- 120, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest
+ 122, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 124, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event
+ 48, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
+ 125, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest
2, // 41: vtadmin.GetBackupsResponse.backups:type_name -> vtadmin.ClusterBackup
4, // 42: vtadmin.GetCellInfosResponse.cell_infos:type_name -> vtadmin.ClusterCellInfo
3, // 43: vtadmin.GetCellsAliasesResponse.aliases:type_name -> vtadmin.ClusterCellsAliases
1, // 44: vtadmin.GetClustersResponse.clusters:type_name -> vtadmin.Cluster
- 14, // 45: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate
- 7, // 46: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace
- 47, // 47: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
- 47, // 48: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
- 8, // 49: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema
- 5, // 50: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition
- 10, // 51: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema
- 117, // 52: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias
- 11, // 53: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet
- 12, // 54: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema
- 13, // 55: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld
- 99, // 56: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
- 117, // 57: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias
- 1, // 58: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster
- 121, // 59: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest
- 1, // 60: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 61: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 119, // 62: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event
- 117, // 63: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias
- 1, // 64: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 65: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias
- 100, // 66: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult
- 101, // 67: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult
- 102, // 68: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult
- 119, // 69: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event
- 117, // 70: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias
- 117, // 71: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias
- 1, // 72: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 73: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias
- 1, // 74: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 75: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias
- 117, // 76: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias
- 117, // 77: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias
- 1, // 78: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 79: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias
- 1, // 80: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 81: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias
- 1, // 82: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster
- 117, // 83: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias
- 117, // 84: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias
- 117, // 85: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias
- 122, // 86: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias
- 108, // 87: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard
- 97, // 88: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize
- 98, // 89: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry
- 96, // 90: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize
- 6, // 91: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows
- 7, // 92: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace
- 119, // 93: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event
- 9, // 94: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard
- 119, // 95: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event
- 11, // 96: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet
- 16, // 97: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest
- 18, // 98: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest
- 19, // 99: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest
- 20, // 100: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest
- 21, // 101: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest
- 23, // 102: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest
- 25, // 103: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest
- 26, // 104: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest
- 28, // 105: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest
- 30, // 106: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest
- 32, // 107: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest
- 34, // 108: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest
- 36, // 109: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest
- 37, // 110: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest
- 39, // 111: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest
- 40, // 112: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest
- 42, // 113: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest
- 44, // 114: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest
- 45, // 115: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest
- 48, // 116: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest
- 49, // 117: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest
- 51, // 118: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest
- 52, // 119: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest
- 54, // 120: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest
- 56, // 121: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest
- 57, // 122: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest
- 59, // 123: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest
- 61, // 124: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest
- 63, // 125: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest
- 65, // 126: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest
- 71, // 127: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest
- 67, // 128: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest
- 69, // 129: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest
- 73, // 130: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest
- 75, // 131: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest
- 77, // 132: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest
- 79, // 133: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest
- 81, // 134: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest
- 83, // 135: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest
- 85, // 136: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest
- 88, // 137: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest
- 89, // 138: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest
- 90, // 139: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest
- 91, // 140: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest
- 17, // 141: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse
- 123, // 142: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse
- 124, // 143: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
- 125, // 144: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
- 22, // 145: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse
- 24, // 146: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse
- 8, // 147: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema
- 27, // 148: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse
- 29, // 149: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse
- 31, // 150: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse
- 33, // 151: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse
- 35, // 152: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse
- 7, // 153: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace
- 38, // 154: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse
- 8, // 155: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema
- 41, // 156: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse
- 43, // 157: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse
- 10, // 158: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema
- 46, // 159: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse
- 11, // 160: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet
- 50, // 161: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse
- 12, // 162: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema
- 53, // 163: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse
- 55, // 164: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse
- 15, // 165: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow
- 58, // 166: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse
- 60, // 167: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse
- 62, // 168: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse
- 64, // 169: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse
- 66, // 170: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse
- 72, // 171: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse
- 68, // 172: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse
- 70, // 173: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse
- 74, // 174: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse
- 76, // 175: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse
- 78, // 176: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse
- 80, // 177: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse
- 82, // 178: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse
- 84, // 179: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse
- 86, // 180: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse
- 126, // 181: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
- 127, // 182: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
- 128, // 183: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
- 92, // 184: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse
- 141, // [141:185] is the sub-list for method output_type
- 97, // [97:141] is the sub-list for method input_type
- 97, // [97:97] is the sub-list for extension type_name
- 97, // [97:97] is the sub-list for extension extendee
- 0, // [0:97] is the sub-list for field type_name
+ 122, // 45: vtadmin.GetFullStatusRequest.alias:type_name -> topodata.TabletAlias
+ 14, // 46: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate
+ 7, // 47: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace
+ 48, // 48: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
+ 48, // 49: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions
+ 8, // 50: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema
+ 5, // 51: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition
+ 10, // 52: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema
+ 122, // 53: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias
+ 11, // 54: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet
+ 12, // 55: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema
+ 13, // 56: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld
+ 104, // 57: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry
+ 122, // 58: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 59: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster
+ 126, // 60: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest
+ 1, // 61: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 62: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 124, // 63: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event
+ 122, // 64: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 65: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 66: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias
+ 105, // 67: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult
+ 106, // 68: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult
+ 107, // 69: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult
+ 124, // 70: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event
+ 122, // 71: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias
+ 122, // 72: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias
+ 1, // 73: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 74: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 75: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 76: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias
+ 122, // 77: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias
+ 122, // 78: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 79: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 80: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 81: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 82: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias
+ 1, // 83: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster
+ 122, // 84: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias
+ 122, // 85: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias
+ 122, // 86: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias
+ 127, // 87: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias
+ 113, // 88: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard
+ 102, // 89: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize
+ 103, // 90: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry
+ 101, // 91: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize
+ 6, // 92: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows
+ 7, // 93: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace
+ 124, // 94: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event
+ 9, // 95: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard
+ 124, // 96: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event
+ 11, // 97: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet
+ 16, // 98: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest
+ 18, // 99: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest
+ 19, // 100: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest
+ 20, // 101: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest
+ 21, // 102: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest
+ 23, // 103: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest
+ 25, // 104: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest
+ 26, // 105: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest
+ 28, // 106: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest
+ 30, // 107: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest
+ 32, // 108: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest
+ 34, // 109: vtadmin.VTAdmin.GetFullStatus:input_type -> vtadmin.GetFullStatusRequest
+ 35, // 110: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest
+ 37, // 111: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest
+ 38, // 112: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest
+ 40, // 113: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest
+ 41, // 114: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest
+ 43, // 115: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest
+ 45, // 116: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest
+ 46, // 117: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest
+ 49, // 118: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest
+ 50, // 119: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest
+ 52, // 120: vtadmin.VTAdmin.GetTopologyPath:input_type -> vtadmin.GetTopologyPathRequest
+ 53, // 121: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest
+ 54, // 122: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest
+ 56, // 123: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest
+ 58, // 124: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest
+ 59, // 125: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest
+ 61, // 126: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest
+ 63, // 127: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest
+ 65, // 128: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest
+ 67, // 129: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest
+ 73, // 130: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest
+ 69, // 131: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest
+ 71, // 132: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest
+ 75, // 133: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest
+ 77, // 134: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest
+ 79, // 135: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest
+ 81, // 136: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest
+ 83, // 137: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest
+ 85, // 138: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest
+ 87, // 139: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest
+ 90, // 140: vtadmin.VTAdmin.Validate:input_type -> vtadmin.ValidateRequest
+ 91, // 141: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest
+ 92, // 142: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest
+ 93, // 143: vtadmin.VTAdmin.ValidateShard:input_type -> vtadmin.ValidateShardRequest
+ 94, // 144: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest
+ 95, // 145: vtadmin.VTAdmin.ValidateVersionShard:input_type -> vtadmin.ValidateVersionShardRequest
+ 96, // 146: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest
+ 17, // 147: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse
+ 128, // 148: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse
+ 129, // 149: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
+ 130, // 150: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
+ 22, // 151: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse
+ 24, // 152: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse
+ 8, // 153: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema
+ 27, // 154: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse
+ 29, // 155: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse
+ 31, // 156: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse
+ 33, // 157: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse
+ 131, // 158: vtadmin.VTAdmin.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse
+ 36, // 159: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse
+ 7, // 160: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace
+ 39, // 161: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse
+ 8, // 162: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema
+ 42, // 163: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse
+ 44, // 164: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse
+ 10, // 165: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema
+ 47, // 166: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse
+ 11, // 167: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet
+ 51, // 168: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse
+ 132, // 169: vtadmin.VTAdmin.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse
+ 12, // 170: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema
+ 55, // 171: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse
+ 57, // 172: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse
+ 15, // 173: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow
+ 60, // 174: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse
+ 62, // 175: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse
+ 64, // 176: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse
+ 66, // 177: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse
+ 68, // 178: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse
+ 74, // 179: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse
+ 70, // 180: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse
+ 72, // 181: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse
+ 76, // 182: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse
+ 78, // 183: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse
+ 80, // 184: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse
+ 82, // 185: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse
+ 84, // 186: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse
+ 86, // 187: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse
+ 88, // 188: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse
+ 133, // 189: vtadmin.VTAdmin.Validate:output_type -> vtctldata.ValidateResponse
+ 134, // 190: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
+ 135, // 191: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
+ 136, // 192: vtadmin.VTAdmin.ValidateShard:output_type -> vtctldata.ValidateShardResponse
+ 137, // 193: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
+ 138, // 194: vtadmin.VTAdmin.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse
+ 97, // 195: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse
+ 147, // [147:196] is the sub-list for method output_type
+ 98, // [98:147] is the sub-list for method input_type
+ 98, // [98:98] is the sub-list for extension type_name
+ 98, // [98:98] is the sub-list for extension extendee
+ 0, // [0:98] is the sub-list for field type_name
}
func init() { file_vtadmin_proto_init() }
@@ -7493,7 +7872,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetGatesRequest); i {
+ switch v := v.(*GetFullStatusRequest); i {
case 0:
return &v.state
case 1:
@@ -7505,7 +7884,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetGatesResponse); i {
+ switch v := v.(*GetGatesRequest); i {
case 0:
return &v.state
case 1:
@@ -7517,7 +7896,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyspaceRequest); i {
+ switch v := v.(*GetGatesResponse); i {
case 0:
return &v.state
case 1:
@@ -7529,7 +7908,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyspacesRequest); i {
+ switch v := v.(*GetKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -7541,7 +7920,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyspacesResponse); i {
+ switch v := v.(*GetKeyspacesRequest); i {
case 0:
return &v.state
case 1:
@@ -7553,7 +7932,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemaRequest); i {
+ switch v := v.(*GetKeyspacesResponse); i {
case 0:
return &v.state
case 1:
@@ -7565,7 +7944,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemasRequest); i {
+ switch v := v.(*GetSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -7577,7 +7956,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemasResponse); i {
+ switch v := v.(*GetSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7589,7 +7968,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardReplicationPositionsRequest); i {
+ switch v := v.(*GetSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7601,7 +7980,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetShardReplicationPositionsResponse); i {
+ switch v := v.(*GetShardReplicationPositionsRequest); i {
case 0:
return &v.state
case 1:
@@ -7613,7 +7992,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemaRequest); i {
+ switch v := v.(*GetShardReplicationPositionsResponse); i {
case 0:
return &v.state
case 1:
@@ -7625,7 +8004,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemasRequest); i {
+ switch v := v.(*GetSrvVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -7637,7 +8016,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSrvVSchemasResponse); i {
+ switch v := v.(*GetSrvVSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7649,7 +8028,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetSchemaTableSizeOptions); i {
+ switch v := v.(*GetSrvVSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7661,7 +8040,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletRequest); i {
+ switch v := v.(*GetSchemaTableSizeOptions); i {
case 0:
return &v.state
case 1:
@@ -7673,7 +8052,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletsRequest); i {
+ switch v := v.(*GetTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -7685,7 +8064,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTabletsResponse); i {
+ switch v := v.(*GetTabletsRequest); i {
case 0:
return &v.state
case 1:
@@ -7697,7 +8076,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemaRequest); i {
+ switch v := v.(*GetTabletsResponse); i {
case 0:
return &v.state
case 1:
@@ -7709,7 +8088,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemasRequest); i {
+ switch v := v.(*GetTopologyPathRequest); i {
case 0:
return &v.state
case 1:
@@ -7721,7 +8100,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemasResponse); i {
+ switch v := v.(*GetVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -7733,7 +8112,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVtctldsRequest); i {
+ switch v := v.(*GetVSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7745,7 +8124,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVtctldsResponse); i {
+ switch v := v.(*GetVSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7757,7 +8136,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowRequest); i {
+ switch v := v.(*GetVtctldsRequest); i {
case 0:
return &v.state
case 1:
@@ -7769,7 +8148,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowsRequest); i {
+ switch v := v.(*GetVtctldsResponse); i {
case 0:
return &v.state
case 1:
@@ -7781,7 +8160,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowsResponse); i {
+ switch v := v.(*GetWorkflowRequest); i {
case 0:
return &v.state
case 1:
@@ -7793,7 +8172,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PingTabletRequest); i {
+ switch v := v.(*GetWorkflowsRequest); i {
case 0:
return &v.state
case 1:
@@ -7805,7 +8184,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PingTabletResponse); i {
+ switch v := v.(*GetWorkflowsResponse); i {
case 0:
return &v.state
case 1:
@@ -7817,7 +8196,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlannedFailoverShardRequest); i {
+ switch v := v.(*PingTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -7829,7 +8208,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlannedFailoverShardResponse); i {
+ switch v := v.(*PingTabletResponse); i {
case 0:
return &v.state
case 1:
@@ -7841,7 +8220,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildKeyspaceGraphRequest); i {
+ switch v := v.(*PlannedFailoverShardRequest); i {
case 0:
return &v.state
case 1:
@@ -7853,7 +8232,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildKeyspaceGraphResponse); i {
+ switch v := v.(*PlannedFailoverShardResponse); i {
case 0:
return &v.state
case 1:
@@ -7865,7 +8244,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateRequest); i {
+ switch v := v.(*RebuildKeyspaceGraphRequest); i {
case 0:
return &v.state
case 1:
@@ -7877,7 +8256,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateResponse); i {
+ switch v := v.(*RebuildKeyspaceGraphResponse); i {
case 0:
return &v.state
case 1:
@@ -7889,7 +8268,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemasRequest); i {
+ switch v := v.(*RefreshStateRequest); i {
case 0:
return &v.state
case 1:
@@ -7901,7 +8280,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemasResponse); i {
+ switch v := v.(*RefreshStateResponse); i {
case 0:
return &v.state
case 1:
@@ -7913,7 +8292,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaShardRequest); i {
+ switch v := v.(*ReloadSchemasRequest); i {
case 0:
return &v.state
case 1:
@@ -7925,7 +8304,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaShardResponse); i {
+ switch v := v.(*ReloadSchemasResponse); i {
case 0:
return &v.state
case 1:
@@ -7937,7 +8316,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshTabletReplicationSourceRequest); i {
+ switch v := v.(*ReloadSchemaShardRequest); i {
case 0:
return &v.state
case 1:
@@ -7949,7 +8328,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshTabletReplicationSourceResponse); i {
+ switch v := v.(*ReloadSchemaShardResponse); i {
case 0:
return &v.state
case 1:
@@ -7961,7 +8340,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveKeyspaceCellRequest); i {
+ switch v := v.(*RefreshTabletReplicationSourceRequest); i {
case 0:
return &v.state
case 1:
@@ -7973,7 +8352,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveKeyspaceCellResponse); i {
+ switch v := v.(*RefreshTabletReplicationSourceResponse); i {
case 0:
return &v.state
case 1:
@@ -7985,7 +8364,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RunHealthCheckRequest); i {
+ switch v := v.(*RemoveKeyspaceCellRequest); i {
case 0:
return &v.state
case 1:
@@ -7997,7 +8376,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RunHealthCheckResponse); i {
+ switch v := v.(*RemoveKeyspaceCellResponse); i {
case 0:
return &v.state
case 1:
@@ -8009,7 +8388,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadOnlyRequest); i {
+ switch v := v.(*RunHealthCheckRequest); i {
case 0:
return &v.state
case 1:
@@ -8021,7 +8400,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadOnlyResponse); i {
+ switch v := v.(*RunHealthCheckResponse); i {
case 0:
return &v.state
case 1:
@@ -8033,7 +8412,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadWriteRequest); i {
+ switch v := v.(*SetReadOnlyRequest); i {
case 0:
return &v.state
case 1:
@@ -8045,7 +8424,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetReadWriteResponse); i {
+ switch v := v.(*SetReadOnlyResponse); i {
case 0:
return &v.state
case 1:
@@ -8057,7 +8436,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartReplicationRequest); i {
+ switch v := v.(*SetReadWriteRequest); i {
case 0:
return &v.state
case 1:
@@ -8069,7 +8448,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartReplicationResponse); i {
+ switch v := v.(*SetReadWriteResponse); i {
case 0:
return &v.state
case 1:
@@ -8081,7 +8460,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopReplicationRequest); i {
+ switch v := v.(*StartReplicationRequest); i {
case 0:
return &v.state
case 1:
@@ -8093,7 +8472,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopReplicationResponse); i {
+ switch v := v.(*StartReplicationResponse); i {
case 0:
return &v.state
case 1:
@@ -8105,7 +8484,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyPromotedRequest); i {
+ switch v := v.(*StopReplicationRequest); i {
case 0:
return &v.state
case 1:
@@ -8117,7 +8496,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyPromotedResponse); i {
+ switch v := v.(*StopReplicationResponse); i {
case 0:
return &v.state
case 1:
@@ -8129,7 +8508,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyReparentedRequest); i {
+ switch v := v.(*TabletExternallyPromotedRequest); i {
case 0:
return &v.state
case 1:
@@ -8141,7 +8520,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateKeyspaceRequest); i {
+ switch v := v.(*TabletExternallyPromotedResponse); i {
case 0:
return &v.state
case 1:
@@ -8153,7 +8532,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateSchemaKeyspaceRequest); i {
+ switch v := v.(*TabletExternallyReparentedRequest); i {
case 0:
return &v.state
case 1:
@@ -8165,7 +8544,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateVersionKeyspaceRequest); i {
+ switch v := v.(*ValidateRequest); i {
case 0:
return &v.state
case 1:
@@ -8177,7 +8556,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VTExplainRequest); i {
+ switch v := v.(*ValidateKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -8189,7 +8568,43 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VTExplainResponse); i {
+ switch v := v.(*ValidateSchemaKeyspaceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateShardRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionKeyspaceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionShardRequest); i {
case 0:
return &v.state
case 1:
@@ -8201,7 +8616,7 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema_ShardTableSize); i {
+ switch v := v.(*VTExplainRequest); i {
case 0:
return &v.state
case 1:
@@ -8213,6 +8628,30 @@ func file_vtadmin_proto_init() {
}
}
file_vtadmin_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VTExplainResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Schema_ShardTableSize); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtadmin_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Schema_TableSize); i {
case 0:
return &v.state
@@ -8224,7 +8663,7 @@ func file_vtadmin_proto_init() {
return nil
}
}
- file_vtadmin_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} {
+ file_vtadmin_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemasResponse_KeyspaceResult); i {
case 0:
return &v.state
@@ -8236,7 +8675,7 @@ func file_vtadmin_proto_init() {
return nil
}
}
- file_vtadmin_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
+ file_vtadmin_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemasResponse_ShardResult); i {
case 0:
return &v.state
@@ -8248,7 +8687,7 @@ func file_vtadmin_proto_init() {
return nil
}
}
- file_vtadmin_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
+ file_vtadmin_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReloadSchemasResponse_TabletResult); i {
case 0:
return &v.state
@@ -8267,7 +8706,7 @@ func file_vtadmin_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vtadmin_proto_rawDesc,
NumEnums: 1,
- NumMessages: 102,
+ NumMessages: 107,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go
index 3dcdc93216b..fd6cda64704 100644
--- a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go
+++ b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go
@@ -54,6 +54,8 @@ type VTAdminClient interface {
GetCellsAliases(ctx context.Context, in *GetCellsAliasesRequest, opts ...grpc.CallOption) (*GetCellsAliasesResponse, error)
// GetClusters returns all configured clusters.
GetClusters(ctx context.Context, in *GetClustersRequest, opts ...grpc.CallOption) (*GetClustersResponse, error)
+ // GetFullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
+ GetFullStatus(ctx context.Context, in *GetFullStatusRequest, opts ...grpc.CallOption) (*vtctldata.GetFullStatusResponse, error)
// GetGates returns all gates across all the specified clusters.
GetGates(ctx context.Context, in *GetGatesRequest, opts ...grpc.CallOption) (*GetGatesResponse, error)
// GetKeyspace returns a keyspace by name in the specified cluster.
@@ -78,6 +80,8 @@ type VTAdminClient interface {
GetTablet(ctx context.Context, in *GetTabletRequest, opts ...grpc.CallOption) (*Tablet, error)
// GetTablets returns all tablets across all the specified clusters.
GetTablets(ctx context.Context, in *GetTabletsRequest, opts ...grpc.CallOption) (*GetTabletsResponse, error)
+ // GetTopologyPath returns the cell located at the specified path in the topology server.
+ GetTopologyPath(ctx context.Context, in *GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error)
// GetVSchema returns a VSchema for the specified keyspace in the specified
// cluster.
GetVSchema(ctx context.Context, in *GetVSchemaRequest, opts ...grpc.CallOption) (*VSchema, error)
@@ -138,6 +142,9 @@ type VTAdminClient interface {
// * "orchestrator" here refers to external orchestrator, not the newer,
// Vitess-aware orchestrator, VTOrc.
TabletExternallyPromoted(ctx context.Context, in *TabletExternallyPromotedRequest, opts ...grpc.CallOption) (*TabletExternallyPromotedResponse, error)
+ // Validate validates all nodes in a cluster that are reachable from the global replication graph,
+ // as well as all tablets in discoverable cells, are consistent
+ Validate(ctx context.Context, in *ValidateRequest, opts ...grpc.CallOption) (*vtctldata.ValidateResponse, error)
// ValidateKeyspace validates that all nodes reachable from the specified
// keyspace are consistent.
ValidateKeyspace(ctx context.Context, in *ValidateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateKeyspaceResponse, error)
@@ -145,9 +152,13 @@ type VTAdminClient interface {
// for shard 0 matches the schema on all of the other tablets in the
// keyspace.
ValidateSchemaKeyspace(ctx context.Context, in *ValidateSchemaKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateSchemaKeyspaceResponse, error)
+ // ValidateShard validates that that all nodes reachable from the specified shard are consistent.
+ ValidateShard(ctx context.Context, in *ValidateShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of
// shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(ctx context.Context, in *ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(ctx context.Context, in *ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error)
// VTExplain provides information on how Vitess plans to execute a
// particular query.
VTExplain(ctx context.Context, in *VTExplainRequest, opts ...grpc.CallOption) (*VTExplainResponse, error)
@@ -260,6 +271,15 @@ func (c *vTAdminClient) GetClusters(ctx context.Context, in *GetClustersRequest,
return out, nil
}
+func (c *vTAdminClient) GetFullStatus(ctx context.Context, in *GetFullStatusRequest, opts ...grpc.CallOption) (*vtctldata.GetFullStatusResponse, error) {
+ out := new(vtctldata.GetFullStatusResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetFullStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) GetGates(ctx context.Context, in *GetGatesRequest, opts ...grpc.CallOption) (*GetGatesResponse, error) {
out := new(GetGatesResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetGates", in, out, opts...)
@@ -350,6 +370,15 @@ func (c *vTAdminClient) GetTablets(ctx context.Context, in *GetTabletsRequest, o
return out, nil
}
+func (c *vTAdminClient) GetTopologyPath(ctx context.Context, in *GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error) {
+ out := new(vtctldata.GetTopologyPathResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetTopologyPath", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) GetVSchema(ctx context.Context, in *GetVSchemaRequest, opts ...grpc.CallOption) (*VSchema, error) {
out := new(VSchema)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetVSchema", in, out, opts...)
@@ -521,6 +550,15 @@ func (c *vTAdminClient) TabletExternallyPromoted(ctx context.Context, in *Tablet
return out, nil
}
+func (c *vTAdminClient) Validate(ctx context.Context, in *ValidateRequest, opts ...grpc.CallOption) (*vtctldata.ValidateResponse, error) {
+ out := new(vtctldata.ValidateResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/Validate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) ValidateKeyspace(ctx context.Context, in *ValidateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateKeyspaceResponse, error) {
out := new(vtctldata.ValidateKeyspaceResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateKeyspace", in, out, opts...)
@@ -539,6 +577,15 @@ func (c *vTAdminClient) ValidateSchemaKeyspace(ctx context.Context, in *Validate
return out, nil
}
+func (c *vTAdminClient) ValidateShard(ctx context.Context, in *ValidateShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateShardResponse, error) {
+ out := new(vtctldata.ValidateShardResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateShard", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) ValidateVersionKeyspace(ctx context.Context, in *ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionKeyspaceResponse, error) {
out := new(vtctldata.ValidateVersionKeyspaceResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateVersionKeyspace", in, out, opts...)
@@ -548,6 +595,15 @@ func (c *vTAdminClient) ValidateVersionKeyspace(ctx context.Context, in *Validat
return out, nil
}
+func (c *vTAdminClient) ValidateVersionShard(ctx context.Context, in *ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error) {
+ out := new(vtctldata.ValidateVersionShardResponse)
+ err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ValidateVersionShard", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vTAdminClient) VTExplain(ctx context.Context, in *VTExplainRequest, opts ...grpc.CallOption) (*VTExplainResponse, error) {
out := new(VTExplainResponse)
err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/VTExplain", in, out, opts...)
@@ -592,6 +648,8 @@ type VTAdminServer interface {
GetCellsAliases(context.Context, *GetCellsAliasesRequest) (*GetCellsAliasesResponse, error)
// GetClusters returns all configured clusters.
GetClusters(context.Context, *GetClustersRequest) (*GetClustersResponse, error)
+ // GetFullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
+ GetFullStatus(context.Context, *GetFullStatusRequest) (*vtctldata.GetFullStatusResponse, error)
// GetGates returns all gates across all the specified clusters.
GetGates(context.Context, *GetGatesRequest) (*GetGatesResponse, error)
// GetKeyspace returns a keyspace by name in the specified cluster.
@@ -616,6 +674,8 @@ type VTAdminServer interface {
GetTablet(context.Context, *GetTabletRequest) (*Tablet, error)
// GetTablets returns all tablets across all the specified clusters.
GetTablets(context.Context, *GetTabletsRequest) (*GetTabletsResponse, error)
+ // GetTopologyPath returns the cell located at the specified path in the topology server.
+ GetTopologyPath(context.Context, *GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error)
// GetVSchema returns a VSchema for the specified keyspace in the specified
// cluster.
GetVSchema(context.Context, *GetVSchemaRequest) (*VSchema, error)
@@ -676,6 +736,9 @@ type VTAdminServer interface {
// * "orchestrator" here refers to external orchestrator, not the newer,
// Vitess-aware orchestrator, VTOrc.
TabletExternallyPromoted(context.Context, *TabletExternallyPromotedRequest) (*TabletExternallyPromotedResponse, error)
+ // Validate validates all nodes in a cluster that are reachable from the global replication graph,
+ // as well as all tablets in discoverable cells, are consistent
+ Validate(context.Context, *ValidateRequest) (*vtctldata.ValidateResponse, error)
// ValidateKeyspace validates that all nodes reachable from the specified
// keyspace are consistent.
ValidateKeyspace(context.Context, *ValidateKeyspaceRequest) (*vtctldata.ValidateKeyspaceResponse, error)
@@ -683,9 +746,13 @@ type VTAdminServer interface {
// for shard 0 matches the schema on all of the other tablets in the
// keyspace.
ValidateSchemaKeyspace(context.Context, *ValidateSchemaKeyspaceRequest) (*vtctldata.ValidateSchemaKeyspaceResponse, error)
+ // ValidateShard validates that that all nodes reachable from the specified shard are consistent.
+ ValidateShard(context.Context, *ValidateShardRequest) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of
// shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(context.Context, *ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(context.Context, *ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error)
// VTExplain provides information on how Vitess plans to execute a
// particular query.
VTExplain(context.Context, *VTExplainRequest) (*VTExplainResponse, error)
@@ -729,6 +796,9 @@ func (UnimplementedVTAdminServer) GetCellsAliases(context.Context, *GetCellsAlia
func (UnimplementedVTAdminServer) GetClusters(context.Context, *GetClustersRequest) (*GetClustersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetClusters not implemented")
}
+func (UnimplementedVTAdminServer) GetFullStatus(context.Context, *GetFullStatusRequest) (*vtctldata.GetFullStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetFullStatus not implemented")
+}
func (UnimplementedVTAdminServer) GetGates(context.Context, *GetGatesRequest) (*GetGatesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetGates not implemented")
}
@@ -759,6 +829,9 @@ func (UnimplementedVTAdminServer) GetTablet(context.Context, *GetTabletRequest)
func (UnimplementedVTAdminServer) GetTablets(context.Context, *GetTabletsRequest) (*GetTabletsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTablets not implemented")
}
+func (UnimplementedVTAdminServer) GetTopologyPath(context.Context, *GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTopologyPath not implemented")
+}
func (UnimplementedVTAdminServer) GetVSchema(context.Context, *GetVSchemaRequest) (*VSchema, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVSchema not implemented")
}
@@ -816,15 +889,24 @@ func (UnimplementedVTAdminServer) StopReplication(context.Context, *StopReplicat
func (UnimplementedVTAdminServer) TabletExternallyPromoted(context.Context, *TabletExternallyPromotedRequest) (*TabletExternallyPromotedResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TabletExternallyPromoted not implemented")
}
+func (UnimplementedVTAdminServer) Validate(context.Context, *ValidateRequest) (*vtctldata.ValidateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Validate not implemented")
+}
func (UnimplementedVTAdminServer) ValidateKeyspace(context.Context, *ValidateKeyspaceRequest) (*vtctldata.ValidateKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateKeyspace not implemented")
}
func (UnimplementedVTAdminServer) ValidateSchemaKeyspace(context.Context, *ValidateSchemaKeyspaceRequest) (*vtctldata.ValidateSchemaKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateSchemaKeyspace not implemented")
}
+func (UnimplementedVTAdminServer) ValidateShard(context.Context, *ValidateShardRequest) (*vtctldata.ValidateShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateShard not implemented")
+}
func (UnimplementedVTAdminServer) ValidateVersionKeyspace(context.Context, *ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionKeyspace not implemented")
}
+func (UnimplementedVTAdminServer) ValidateVersionShard(context.Context, *ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionShard not implemented")
+}
func (UnimplementedVTAdminServer) VTExplain(context.Context, *VTExplainRequest) (*VTExplainResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method VTExplain not implemented")
}
@@ -1039,6 +1121,24 @@ func _VTAdmin_GetClusters_Handler(srv interface{}, ctx context.Context, dec func
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_GetFullStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetFullStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).GetFullStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/GetFullStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).GetFullStatus(ctx, req.(*GetFullStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_GetGates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetGatesRequest)
if err := dec(in); err != nil {
@@ -1219,6 +1319,24 @@ func _VTAdmin_GetTablets_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_GetTopologyPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetTopologyPathRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).GetTopologyPath(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/GetTopologyPath",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).GetTopologyPath(ctx, req.(*GetTopologyPathRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_GetVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetVSchemaRequest)
if err := dec(in); err != nil {
@@ -1561,6 +1679,24 @@ func _VTAdmin_TabletExternallyPromoted_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_Validate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).Validate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/Validate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).Validate(ctx, req.(*ValidateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_ValidateKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValidateKeyspaceRequest)
if err := dec(in); err != nil {
@@ -1597,6 +1733,24 @@ func _VTAdmin_ValidateSchemaKeyspace_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_ValidateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidateShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).ValidateShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/ValidateShard",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).ValidateShard(ctx, req.(*ValidateShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_ValidateVersionKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ValidateVersionKeyspaceRequest)
if err := dec(in); err != nil {
@@ -1615,6 +1769,24 @@ func _VTAdmin_ValidateVersionKeyspace_Handler(srv interface{}, ctx context.Conte
return interceptor(ctx, in, info, handler)
}
+func _VTAdmin_ValidateVersionShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ValidateVersionShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VTAdminServer).ValidateVersionShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtadmin.VTAdmin/ValidateVersionShard",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VTAdminServer).ValidateVersionShard(ctx, req.(*ValidateVersionShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _VTAdmin_VTExplain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VTExplainRequest)
if err := dec(in); err != nil {
@@ -1684,6 +1856,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetClusters",
Handler: _VTAdmin_GetClusters_Handler,
},
+ {
+ MethodName: "GetFullStatus",
+ Handler: _VTAdmin_GetFullStatus_Handler,
+ },
{
MethodName: "GetGates",
Handler: _VTAdmin_GetGates_Handler,
@@ -1724,6 +1900,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetTablets",
Handler: _VTAdmin_GetTablets_Handler,
},
+ {
+ MethodName: "GetTopologyPath",
+ Handler: _VTAdmin_GetTopologyPath_Handler,
+ },
{
MethodName: "GetVSchema",
Handler: _VTAdmin_GetVSchema_Handler,
@@ -1800,6 +1980,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "TabletExternallyPromoted",
Handler: _VTAdmin_TabletExternallyPromoted_Handler,
},
+ {
+ MethodName: "Validate",
+ Handler: _VTAdmin_Validate_Handler,
+ },
{
MethodName: "ValidateKeyspace",
Handler: _VTAdmin_ValidateKeyspace_Handler,
@@ -1808,10 +1992,18 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{
MethodName: "ValidateSchemaKeyspace",
Handler: _VTAdmin_ValidateSchemaKeyspace_Handler,
},
+ {
+ MethodName: "ValidateShard",
+ Handler: _VTAdmin_ValidateShard_Handler,
+ },
{
MethodName: "ValidateVersionKeyspace",
Handler: _VTAdmin_ValidateVersionKeyspace_Handler,
},
+ {
+ MethodName: "ValidateVersionShard",
+ Handler: _VTAdmin_ValidateVersionShard_Handler,
+ },
{
MethodName: "VTExplain",
Handler: _VTAdmin_VTExplain_Handler,
diff --git a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go
index e95dc2e6e11..5e6fdcdd663 100644
--- a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go
+++ b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go
@@ -2001,6 +2001,56 @@ func (m *GetClustersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *GetFullStatusRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetFullStatusRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetFullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Alias != nil {
+ size, err := m.Alias.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *GetGatesRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -2823,6 +2873,53 @@ func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5006,6 +5103,56 @@ func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte)
return len(dAtA) - i, nil
}
+func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.PingTablets {
+ i--
+ if m.PingTablets {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5110,6 +5257,70 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int
return len(dAtA) - i, nil
}
+func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.PingTablets {
+ i--
+ if m.PingTablets {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -5157,6 +5368,60 @@ func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (in
return len(dAtA) - i, nil
}
+func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClusterId) > 0 {
+ i -= len(m.ClusterId)
+ copy(dAtA[i:], m.ClusterId)
+ i = encodeVarint(dAtA, i, uint64(len(m.ClusterId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *VTExplainRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -6093,19 +6358,39 @@ func (m *GetClustersResponse) SizeVT() (n int) {
return n
}
-func (m *GetGatesRequest) SizeVT() (n int) {
+func (m *GetFullStatusRequest) SizeVT() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if len(m.ClusterIds) > 0 {
- for _, s := range m.ClusterIds {
- l = len(s)
- n += 1 + l + sov(uint64(l))
- }
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
}
- if m.unknownFields != nil {
+ if m.Alias != nil {
+ l = m.Alias.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
+func (m *GetGatesRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ClusterIds) > 0 {
+ for _, s := range m.ClusterIds {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
+ if m.unknownFields != nil {
n += len(m.unknownFields)
}
return n
@@ -6439,6 +6724,26 @@ func (m *GetTabletsResponse) SizeVT() (n int) {
return n
}
+func (m *GetTopologyPathRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
func (m *GetVSchemaRequest) SizeVT() (n int) {
if m == nil {
return 0
@@ -7357,6 +7662,25 @@ func (m *TabletExternallyReparentedRequest) SizeVT() (n int) {
return n
}
+func (m *ValidateRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.PingTablets {
+ n += 2
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
func (m *ValidateKeyspaceRequest) SizeVT() (n int) {
if m == nil {
return 0
@@ -7400,6 +7724,33 @@ func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) {
return n
}
+func (m *ValidateShardRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Shard)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.PingTablets {
+ n += 2
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) {
if m == nil {
return 0
@@ -7420,6 +7771,30 @@ func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) {
return n
}
+func (m *ValidateVersionShardRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClusterId)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Shard)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
func (m *VTExplainRequest) SizeVT() (n int) {
if m == nil {
return 0
@@ -12484,6 +12859,125 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alias == nil {
+ m.Alias = &topodata.TabletAlias{}
+ }
+ if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -14247,7 +14741,7 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -14270,10 +14764,10 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -14310,7 +14804,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -14338,7 +14832,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Keyspace = string(dAtA[iNdEx:postIndex])
+ m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -14362,7 +14856,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetVSchemasRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -14385,15 +14879,15 @@ func (m *GetVSchemasRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetVSchemasRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -14421,23 +14915,138 @@ func (m *GetVSchemasRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex]))
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
}
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetVSchemasRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetVSchemasRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
}
if iNdEx > l {
@@ -19438,6 +20047,109 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PingTablets = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -19688,7 +20400,7 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
+func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -19711,10 +20423,10 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: ValidateShardRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ValidateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -19781,6 +20493,320 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
}
m.Keyspace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PingTablets = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go
index 85617f9a409..a86d51c545f 100644
--- a/go/vt/proto/vtctldata/vtctldata.pb.go
+++ b/go/vt/proto/vtctldata/vtctldata.pb.go
@@ -1335,6 +1335,8 @@ type BackupRequest struct {
// Concurrency specifies the number of compression/checksum jobs to run
// simultaneously.
Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
+ // BackupEngine specifies if we want to use a particular backup engine for this backup request
+ BackupEngine *string `protobuf:"bytes,6,opt,name=backup_engine,json=backupEngine,proto3,oneof" json:"backup_engine,omitempty"`
}
func (x *BackupRequest) Reset() {
@@ -1390,6 +1392,13 @@ func (x *BackupRequest) GetConcurrency() uint64 {
return 0
}
+func (x *BackupRequest) GetBackupEngine() string {
+ if x != nil && x.BackupEngine != nil {
+ return *x.BackupEngine
+ }
+ return ""
+}
+
type BackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2578,6 +2587,9 @@ type EmergencyReparentShardRequest struct {
// PreventCrossCellPromotion is used to only promote the new primary from the same cell
// as the failed primary.
PreventCrossCellPromotion bool `protobuf:"varint,6,opt,name=prevent_cross_cell_promotion,json=preventCrossCellPromotion,proto3" json:"prevent_cross_cell_promotion,omitempty"`
+ // ExpectedPrimary is the optional alias we expect to be the current primary in order for
+ // the reparent operation to succeed.
+ ExpectedPrimary *topodata.TabletAlias `protobuf:"bytes,8,opt,name=expected_primary,json=expectedPrimary,proto3" json:"expected_primary,omitempty"`
}
func (x *EmergencyReparentShardRequest) Reset() {
@@ -2654,6 +2666,13 @@ func (x *EmergencyReparentShardRequest) GetPreventCrossCellPromotion() bool {
return false
}
+func (x *EmergencyReparentShardRequest) GetExpectedPrimary() *topodata.TabletAlias {
+ if x != nil {
+ return x.ExpectedPrimary
+ }
+ return nil
+}
+
type EmergencyReparentShardResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5015,6 +5034,173 @@ func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet {
return nil
}
+type GetTopologyPathRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *GetTopologyPathRequest) Reset() {
+ *x = GetTopologyPathRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[86]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopologyPathRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopologyPathRequest) ProtoMessage() {}
+
+func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[86]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead.
+func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{86}
+}
+
+func (x *GetTopologyPathRequest) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+type GetTopologyPathResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Cell *TopologyCell `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"`
+}
+
+func (x *GetTopologyPathResponse) Reset() {
+ *x = GetTopologyPathResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[87]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTopologyPathResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTopologyPathResponse) ProtoMessage() {}
+
+func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[87]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead.
+func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{87}
+}
+
+func (x *GetTopologyPathResponse) GetCell() *TopologyCell {
+ if x != nil {
+ return x.Cell
+ }
+ return nil
+}
+
+type TopologyCell struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // Data is the file contents of the cell located at path.
+ // It is only populated if the cell is a terminal node.
+ Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+ Children []string `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"`
+}
+
+func (x *TopologyCell) Reset() {
+ *x = TopologyCell{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[88]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TopologyCell) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TopologyCell) ProtoMessage() {}
+
+func (x *TopologyCell) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[88]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead.
+func (*TopologyCell) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{88}
+}
+
+func (x *TopologyCell) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *TopologyCell) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *TopologyCell) GetData() string {
+ if x != nil {
+ return x.Data
+ }
+ return ""
+}
+
+func (x *TopologyCell) GetChildren() []string {
+ if x != nil {
+ return x.Children
+ }
+ return nil
+}
+
type GetVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5026,7 +5212,7 @@ type GetVSchemaRequest struct {
func (x *GetVSchemaRequest) Reset() {
*x = GetVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[86]
+ mi := &file_vtctldata_proto_msgTypes[89]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5039,7 +5225,7 @@ func (x *GetVSchemaRequest) String() string {
func (*GetVSchemaRequest) ProtoMessage() {}
func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[86]
+ mi := &file_vtctldata_proto_msgTypes[89]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5052,7 +5238,7 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead.
func (*GetVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{86}
+ return file_vtctldata_proto_rawDescGZIP(), []int{89}
}
func (x *GetVSchemaRequest) GetKeyspace() string {
@@ -5073,7 +5259,7 @@ type GetVersionRequest struct {
func (x *GetVersionRequest) Reset() {
*x = GetVersionRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[87]
+ mi := &file_vtctldata_proto_msgTypes[90]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5086,7 +5272,7 @@ func (x *GetVersionRequest) String() string {
func (*GetVersionRequest) ProtoMessage() {}
func (x *GetVersionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[87]
+ mi := &file_vtctldata_proto_msgTypes[90]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5099,7 +5285,7 @@ func (x *GetVersionRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead.
func (*GetVersionRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{87}
+ return file_vtctldata_proto_rawDescGZIP(), []int{90}
}
func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -5120,7 +5306,7 @@ type GetVersionResponse struct {
func (x *GetVersionResponse) Reset() {
*x = GetVersionResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[88]
+ mi := &file_vtctldata_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5133,7 +5319,7 @@ func (x *GetVersionResponse) String() string {
func (*GetVersionResponse) ProtoMessage() {}
func (x *GetVersionResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[88]
+ mi := &file_vtctldata_proto_msgTypes[91]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5146,7 +5332,7 @@ func (x *GetVersionResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead.
func (*GetVersionResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{88}
+ return file_vtctldata_proto_rawDescGZIP(), []int{91}
}
func (x *GetVersionResponse) GetVersion() string {
@@ -5167,7 +5353,7 @@ type GetVSchemaResponse struct {
func (x *GetVSchemaResponse) Reset() {
*x = GetVSchemaResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[89]
+ mi := &file_vtctldata_proto_msgTypes[92]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5180,7 +5366,7 @@ func (x *GetVSchemaResponse) String() string {
func (*GetVSchemaResponse) ProtoMessage() {}
func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[89]
+ mi := &file_vtctldata_proto_msgTypes[92]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5193,7 +5379,7 @@ func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead.
func (*GetVSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{89}
+ return file_vtctldata_proto_rawDescGZIP(), []int{92}
}
func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace {
@@ -5215,7 +5401,7 @@ type GetWorkflowsRequest struct {
func (x *GetWorkflowsRequest) Reset() {
*x = GetWorkflowsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[90]
+ mi := &file_vtctldata_proto_msgTypes[93]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5228,7 +5414,7 @@ func (x *GetWorkflowsRequest) String() string {
func (*GetWorkflowsRequest) ProtoMessage() {}
func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[90]
+ mi := &file_vtctldata_proto_msgTypes[93]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5241,7 +5427,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{90}
+ return file_vtctldata_proto_rawDescGZIP(), []int{93}
}
func (x *GetWorkflowsRequest) GetKeyspace() string {
@@ -5269,7 +5455,7 @@ type GetWorkflowsResponse struct {
func (x *GetWorkflowsResponse) Reset() {
*x = GetWorkflowsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[91]
+ mi := &file_vtctldata_proto_msgTypes[94]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5282,7 +5468,7 @@ func (x *GetWorkflowsResponse) String() string {
func (*GetWorkflowsResponse) ProtoMessage() {}
func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[91]
+ mi := &file_vtctldata_proto_msgTypes[94]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5295,7 +5481,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead.
func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{91}
+ return file_vtctldata_proto_rawDescGZIP(), []int{94}
}
func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow {
@@ -5320,7 +5506,7 @@ type InitShardPrimaryRequest struct {
func (x *InitShardPrimaryRequest) Reset() {
*x = InitShardPrimaryRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[92]
+ mi := &file_vtctldata_proto_msgTypes[95]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5333,7 +5519,7 @@ func (x *InitShardPrimaryRequest) String() string {
func (*InitShardPrimaryRequest) ProtoMessage() {}
func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[92]
+ mi := &file_vtctldata_proto_msgTypes[95]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5346,7 +5532,7 @@ func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead.
func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{92}
+ return file_vtctldata_proto_rawDescGZIP(), []int{95}
}
func (x *InitShardPrimaryRequest) GetKeyspace() string {
@@ -5395,7 +5581,7 @@ type InitShardPrimaryResponse struct {
func (x *InitShardPrimaryResponse) Reset() {
*x = InitShardPrimaryResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[93]
+ mi := &file_vtctldata_proto_msgTypes[96]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5408,7 +5594,7 @@ func (x *InitShardPrimaryResponse) String() string {
func (*InitShardPrimaryResponse) ProtoMessage() {}
func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[93]
+ mi := &file_vtctldata_proto_msgTypes[96]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5421,7 +5607,7 @@ func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead.
func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{93}
+ return file_vtctldata_proto_rawDescGZIP(), []int{96}
}
func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event {
@@ -5442,7 +5628,7 @@ type PingTabletRequest struct {
func (x *PingTabletRequest) Reset() {
*x = PingTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[94]
+ mi := &file_vtctldata_proto_msgTypes[97]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5455,7 +5641,7 @@ func (x *PingTabletRequest) String() string {
func (*PingTabletRequest) ProtoMessage() {}
func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[94]
+ mi := &file_vtctldata_proto_msgTypes[97]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5468,7 +5654,7 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead.
func (*PingTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{94}
+ return file_vtctldata_proto_rawDescGZIP(), []int{97}
}
func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -5487,7 +5673,7 @@ type PingTabletResponse struct {
func (x *PingTabletResponse) Reset() {
*x = PingTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[95]
+ mi := &file_vtctldata_proto_msgTypes[98]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5500,7 +5686,7 @@ func (x *PingTabletResponse) String() string {
func (*PingTabletResponse) ProtoMessage() {}
func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[95]
+ mi := &file_vtctldata_proto_msgTypes[98]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5513,7 +5699,7 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead.
func (*PingTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{95}
+ return file_vtctldata_proto_rawDescGZIP(), []int{98}
}
type PlannedReparentShardRequest struct {
@@ -5543,12 +5729,15 @@ type PlannedReparentShardRequest struct {
// WaitReplicasTimeout time to catch up before the reparent, and an additional
// WaitReplicasTimeout time to catch up after the reparent.
WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"`
+ // ExpectedPrimary is the optional alias we expect to be the current primary in order for
+ // the reparent operation to succeed.
+ ExpectedPrimary *topodata.TabletAlias `protobuf:"bytes,8,opt,name=expected_primary,json=expectedPrimary,proto3" json:"expected_primary,omitempty"`
}
func (x *PlannedReparentShardRequest) Reset() {
*x = PlannedReparentShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[96]
+ mi := &file_vtctldata_proto_msgTypes[99]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5561,7 +5750,7 @@ func (x *PlannedReparentShardRequest) String() string {
func (*PlannedReparentShardRequest) ProtoMessage() {}
func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[96]
+ mi := &file_vtctldata_proto_msgTypes[99]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5574,7 +5763,7 @@ func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead.
func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{96}
+ return file_vtctldata_proto_rawDescGZIP(), []int{99}
}
func (x *PlannedReparentShardRequest) GetKeyspace() string {
@@ -5612,6 +5801,13 @@ func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration
return nil
}
+func (x *PlannedReparentShardRequest) GetExpectedPrimary() *topodata.TabletAlias {
+ if x != nil {
+ return x.ExpectedPrimary
+ }
+ return nil
+}
+
type PlannedReparentShardResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -5632,7 +5828,7 @@ type PlannedReparentShardResponse struct {
func (x *PlannedReparentShardResponse) Reset() {
*x = PlannedReparentShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[97]
+ mi := &file_vtctldata_proto_msgTypes[100]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5645,7 +5841,7 @@ func (x *PlannedReparentShardResponse) String() string {
func (*PlannedReparentShardResponse) ProtoMessage() {}
func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[97]
+ mi := &file_vtctldata_proto_msgTypes[100]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5658,7 +5854,7 @@ func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead.
func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{97}
+ return file_vtctldata_proto_rawDescGZIP(), []int{100}
}
func (x *PlannedReparentShardResponse) GetKeyspace() string {
@@ -5704,7 +5900,7 @@ type RebuildKeyspaceGraphRequest struct {
func (x *RebuildKeyspaceGraphRequest) Reset() {
*x = RebuildKeyspaceGraphRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[98]
+ mi := &file_vtctldata_proto_msgTypes[101]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5717,7 +5913,7 @@ func (x *RebuildKeyspaceGraphRequest) String() string {
func (*RebuildKeyspaceGraphRequest) ProtoMessage() {}
func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[98]
+ mi := &file_vtctldata_proto_msgTypes[101]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5730,7 +5926,7 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{98}
+ return file_vtctldata_proto_rawDescGZIP(), []int{101}
}
func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string {
@@ -5763,7 +5959,7 @@ type RebuildKeyspaceGraphResponse struct {
func (x *RebuildKeyspaceGraphResponse) Reset() {
*x = RebuildKeyspaceGraphResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[99]
+ mi := &file_vtctldata_proto_msgTypes[102]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5776,7 +5972,7 @@ func (x *RebuildKeyspaceGraphResponse) String() string {
func (*RebuildKeyspaceGraphResponse) ProtoMessage() {}
func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[99]
+ mi := &file_vtctldata_proto_msgTypes[102]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5789,7 +5985,7 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead.
func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{99}
+ return file_vtctldata_proto_rawDescGZIP(), []int{102}
}
type RebuildVSchemaGraphRequest struct {
@@ -5805,7 +6001,7 @@ type RebuildVSchemaGraphRequest struct {
func (x *RebuildVSchemaGraphRequest) Reset() {
*x = RebuildVSchemaGraphRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[100]
+ mi := &file_vtctldata_proto_msgTypes[103]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5818,7 +6014,7 @@ func (x *RebuildVSchemaGraphRequest) String() string {
func (*RebuildVSchemaGraphRequest) ProtoMessage() {}
func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[100]
+ mi := &file_vtctldata_proto_msgTypes[103]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5831,7 +6027,7 @@ func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead.
func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{100}
+ return file_vtctldata_proto_rawDescGZIP(), []int{103}
}
func (x *RebuildVSchemaGraphRequest) GetCells() []string {
@@ -5850,7 +6046,7 @@ type RebuildVSchemaGraphResponse struct {
func (x *RebuildVSchemaGraphResponse) Reset() {
*x = RebuildVSchemaGraphResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[101]
+ mi := &file_vtctldata_proto_msgTypes[104]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5863,7 +6059,7 @@ func (x *RebuildVSchemaGraphResponse) String() string {
func (*RebuildVSchemaGraphResponse) ProtoMessage() {}
func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[101]
+ mi := &file_vtctldata_proto_msgTypes[104]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5876,7 +6072,7 @@ func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead.
func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{101}
+ return file_vtctldata_proto_rawDescGZIP(), []int{104}
}
type RefreshStateRequest struct {
@@ -5890,7 +6086,7 @@ type RefreshStateRequest struct {
func (x *RefreshStateRequest) Reset() {
*x = RefreshStateRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[102]
+ mi := &file_vtctldata_proto_msgTypes[105]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5903,7 +6099,7 @@ func (x *RefreshStateRequest) String() string {
func (*RefreshStateRequest) ProtoMessage() {}
func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[102]
+ mi := &file_vtctldata_proto_msgTypes[105]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5916,7 +6112,7 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead.
func (*RefreshStateRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{102}
+ return file_vtctldata_proto_rawDescGZIP(), []int{105}
}
func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -5935,7 +6131,7 @@ type RefreshStateResponse struct {
func (x *RefreshStateResponse) Reset() {
*x = RefreshStateResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[103]
+ mi := &file_vtctldata_proto_msgTypes[106]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5948,7 +6144,7 @@ func (x *RefreshStateResponse) String() string {
func (*RefreshStateResponse) ProtoMessage() {}
func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[103]
+ mi := &file_vtctldata_proto_msgTypes[106]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5961,7 +6157,7 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead.
func (*RefreshStateResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{103}
+ return file_vtctldata_proto_rawDescGZIP(), []int{106}
}
type RefreshStateByShardRequest struct {
@@ -5977,7 +6173,7 @@ type RefreshStateByShardRequest struct {
func (x *RefreshStateByShardRequest) Reset() {
*x = RefreshStateByShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[104]
+ mi := &file_vtctldata_proto_msgTypes[107]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5990,7 +6186,7 @@ func (x *RefreshStateByShardRequest) String() string {
func (*RefreshStateByShardRequest) ProtoMessage() {}
func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[104]
+ mi := &file_vtctldata_proto_msgTypes[107]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6003,7 +6199,7 @@ func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead.
func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{104}
+ return file_vtctldata_proto_rawDescGZIP(), []int{107}
}
func (x *RefreshStateByShardRequest) GetKeyspace() string {
@@ -6040,7 +6236,7 @@ type RefreshStateByShardResponse struct {
func (x *RefreshStateByShardResponse) Reset() {
*x = RefreshStateByShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[105]
+ mi := &file_vtctldata_proto_msgTypes[108]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6053,7 +6249,7 @@ func (x *RefreshStateByShardResponse) String() string {
func (*RefreshStateByShardResponse) ProtoMessage() {}
func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[105]
+ mi := &file_vtctldata_proto_msgTypes[108]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6066,7 +6262,7 @@ func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead.
func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{105}
+ return file_vtctldata_proto_rawDescGZIP(), []int{108}
}
func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool {
@@ -6094,7 +6290,7 @@ type ReloadSchemaRequest struct {
func (x *ReloadSchemaRequest) Reset() {
*x = ReloadSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[106]
+ mi := &file_vtctldata_proto_msgTypes[109]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6107,7 +6303,7 @@ func (x *ReloadSchemaRequest) String() string {
func (*ReloadSchemaRequest) ProtoMessage() {}
func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[106]
+ mi := &file_vtctldata_proto_msgTypes[109]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6120,7 +6316,7 @@ func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{106}
+ return file_vtctldata_proto_rawDescGZIP(), []int{109}
}
func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -6139,7 +6335,7 @@ type ReloadSchemaResponse struct {
func (x *ReloadSchemaResponse) Reset() {
*x = ReloadSchemaResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[107]
+ mi := &file_vtctldata_proto_msgTypes[110]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6152,7 +6348,7 @@ func (x *ReloadSchemaResponse) String() string {
func (*ReloadSchemaResponse) ProtoMessage() {}
func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[107]
+ mi := &file_vtctldata_proto_msgTypes[110]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6165,7 +6361,7 @@ func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{107}
+ return file_vtctldata_proto_rawDescGZIP(), []int{110}
}
type ReloadSchemaKeyspaceRequest struct {
@@ -6185,7 +6381,7 @@ type ReloadSchemaKeyspaceRequest struct {
func (x *ReloadSchemaKeyspaceRequest) Reset() {
*x = ReloadSchemaKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[108]
+ mi := &file_vtctldata_proto_msgTypes[111]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6198,7 +6394,7 @@ func (x *ReloadSchemaKeyspaceRequest) String() string {
func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {}
func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[108]
+ mi := &file_vtctldata_proto_msgTypes[111]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6211,7 +6407,7 @@ func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{108}
+ return file_vtctldata_proto_rawDescGZIP(), []int{111}
}
func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string {
@@ -6253,7 +6449,7 @@ type ReloadSchemaKeyspaceResponse struct {
func (x *ReloadSchemaKeyspaceResponse) Reset() {
*x = ReloadSchemaKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[109]
+ mi := &file_vtctldata_proto_msgTypes[112]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6266,7 +6462,7 @@ func (x *ReloadSchemaKeyspaceResponse) String() string {
func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {}
func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[109]
+ mi := &file_vtctldata_proto_msgTypes[112]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6279,7 +6475,7 @@ func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{109}
+ return file_vtctldata_proto_rawDescGZIP(), []int{112}
}
func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event {
@@ -6305,7 +6501,7 @@ type ReloadSchemaShardRequest struct {
func (x *ReloadSchemaShardRequest) Reset() {
*x = ReloadSchemaShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[110]
+ mi := &file_vtctldata_proto_msgTypes[113]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6318,7 +6514,7 @@ func (x *ReloadSchemaShardRequest) String() string {
func (*ReloadSchemaShardRequest) ProtoMessage() {}
func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[110]
+ mi := &file_vtctldata_proto_msgTypes[113]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6331,7 +6527,7 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{110}
+ return file_vtctldata_proto_rawDescGZIP(), []int{113}
}
func (x *ReloadSchemaShardRequest) GetKeyspace() string {
@@ -6380,7 +6576,7 @@ type ReloadSchemaShardResponse struct {
func (x *ReloadSchemaShardResponse) Reset() {
*x = ReloadSchemaShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[111]
+ mi := &file_vtctldata_proto_msgTypes[114]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6393,7 +6589,7 @@ func (x *ReloadSchemaShardResponse) String() string {
func (*ReloadSchemaShardResponse) ProtoMessage() {}
func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[111]
+ mi := &file_vtctldata_proto_msgTypes[114]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6406,7 +6602,7 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead.
func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{111}
+ return file_vtctldata_proto_rawDescGZIP(), []int{114}
}
func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event {
@@ -6429,7 +6625,7 @@ type RemoveBackupRequest struct {
func (x *RemoveBackupRequest) Reset() {
*x = RemoveBackupRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[112]
+ mi := &file_vtctldata_proto_msgTypes[115]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6442,7 +6638,7 @@ func (x *RemoveBackupRequest) String() string {
func (*RemoveBackupRequest) ProtoMessage() {}
func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[112]
+ mi := &file_vtctldata_proto_msgTypes[115]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6455,7 +6651,7 @@ func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead.
func (*RemoveBackupRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{112}
+ return file_vtctldata_proto_rawDescGZIP(), []int{115}
}
func (x *RemoveBackupRequest) GetKeyspace() string {
@@ -6488,7 +6684,7 @@ type RemoveBackupResponse struct {
func (x *RemoveBackupResponse) Reset() {
*x = RemoveBackupResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[113]
+ mi := &file_vtctldata_proto_msgTypes[116]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6501,7 +6697,7 @@ func (x *RemoveBackupResponse) String() string {
func (*RemoveBackupResponse) ProtoMessage() {}
func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[113]
+ mi := &file_vtctldata_proto_msgTypes[116]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6514,7 +6710,7 @@ func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead.
func (*RemoveBackupResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{113}
+ return file_vtctldata_proto_rawDescGZIP(), []int{116}
}
type RemoveKeyspaceCellRequest struct {
@@ -6536,7 +6732,7 @@ type RemoveKeyspaceCellRequest struct {
func (x *RemoveKeyspaceCellRequest) Reset() {
*x = RemoveKeyspaceCellRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[114]
+ mi := &file_vtctldata_proto_msgTypes[117]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6549,7 +6745,7 @@ func (x *RemoveKeyspaceCellRequest) String() string {
func (*RemoveKeyspaceCellRequest) ProtoMessage() {}
func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[114]
+ mi := &file_vtctldata_proto_msgTypes[117]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6562,7 +6758,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{114}
+ return file_vtctldata_proto_rawDescGZIP(), []int{117}
}
func (x *RemoveKeyspaceCellRequest) GetKeyspace() string {
@@ -6602,7 +6798,7 @@ type RemoveKeyspaceCellResponse struct {
func (x *RemoveKeyspaceCellResponse) Reset() {
*x = RemoveKeyspaceCellResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[115]
+ mi := &file_vtctldata_proto_msgTypes[118]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6615,7 +6811,7 @@ func (x *RemoveKeyspaceCellResponse) String() string {
func (*RemoveKeyspaceCellResponse) ProtoMessage() {}
func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[115]
+ mi := &file_vtctldata_proto_msgTypes[118]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6628,7 +6824,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead.
func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{115}
+ return file_vtctldata_proto_rawDescGZIP(), []int{118}
}
type RemoveShardCellRequest struct {
@@ -6651,7 +6847,7 @@ type RemoveShardCellRequest struct {
func (x *RemoveShardCellRequest) Reset() {
*x = RemoveShardCellRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[116]
+ mi := &file_vtctldata_proto_msgTypes[119]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6664,7 +6860,7 @@ func (x *RemoveShardCellRequest) String() string {
func (*RemoveShardCellRequest) ProtoMessage() {}
func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[116]
+ mi := &file_vtctldata_proto_msgTypes[119]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6677,7 +6873,7 @@ func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead.
func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{116}
+ return file_vtctldata_proto_rawDescGZIP(), []int{119}
}
func (x *RemoveShardCellRequest) GetKeyspace() string {
@@ -6724,7 +6920,7 @@ type RemoveShardCellResponse struct {
func (x *RemoveShardCellResponse) Reset() {
*x = RemoveShardCellResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[117]
+ mi := &file_vtctldata_proto_msgTypes[120]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6737,7 +6933,7 @@ func (x *RemoveShardCellResponse) String() string {
func (*RemoveShardCellResponse) ProtoMessage() {}
func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[117]
+ mi := &file_vtctldata_proto_msgTypes[120]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6750,7 +6946,7 @@ func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead.
func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{117}
+ return file_vtctldata_proto_rawDescGZIP(), []int{120}
}
type ReparentTabletRequest struct {
@@ -6766,7 +6962,7 @@ type ReparentTabletRequest struct {
func (x *ReparentTabletRequest) Reset() {
*x = ReparentTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[118]
+ mi := &file_vtctldata_proto_msgTypes[121]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6779,7 +6975,7 @@ func (x *ReparentTabletRequest) String() string {
func (*ReparentTabletRequest) ProtoMessage() {}
func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[118]
+ mi := &file_vtctldata_proto_msgTypes[121]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6792,7 +6988,7 @@ func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead.
func (*ReparentTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{118}
+ return file_vtctldata_proto_rawDescGZIP(), []int{121}
}
func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias {
@@ -6818,7 +7014,7 @@ type ReparentTabletResponse struct {
func (x *ReparentTabletResponse) Reset() {
*x = ReparentTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[119]
+ mi := &file_vtctldata_proto_msgTypes[122]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6831,7 +7027,7 @@ func (x *ReparentTabletResponse) String() string {
func (*ReparentTabletResponse) ProtoMessage() {}
func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[119]
+ mi := &file_vtctldata_proto_msgTypes[122]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6844,7 +7040,7 @@ func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead.
func (*ReparentTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{119}
+ return file_vtctldata_proto_rawDescGZIP(), []int{122}
}
func (x *ReparentTabletResponse) GetKeyspace() string {
@@ -6877,12 +7073,14 @@ type RestoreFromBackupRequest struct {
// BackupTime, if set, will use the backup taken most closely at or before
// this time. If nil, the latest backup will be restored on the tablet.
BackupTime *vttime.Time `protobuf:"bytes,2,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"`
+ // AllowedBackupEngines, if present will filter out any backups taken with engines not included in the list
+ AllowedBackupEngines []string `protobuf:"bytes,6,rep,name=allowed_backup_engines,json=allowedBackupEngines,proto3" json:"allowed_backup_engines,omitempty"`
}
func (x *RestoreFromBackupRequest) Reset() {
*x = RestoreFromBackupRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[120]
+ mi := &file_vtctldata_proto_msgTypes[123]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6895,7 +7093,7 @@ func (x *RestoreFromBackupRequest) String() string {
func (*RestoreFromBackupRequest) ProtoMessage() {}
func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[120]
+ mi := &file_vtctldata_proto_msgTypes[123]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6908,7 +7106,7 @@ func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead.
func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{120}
+ return file_vtctldata_proto_rawDescGZIP(), []int{123}
}
func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -6925,6 +7123,13 @@ func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time {
return nil
}
+func (x *RestoreFromBackupRequest) GetAllowedBackupEngines() []string {
+ if x != nil {
+ return x.AllowedBackupEngines
+ }
+ return nil
+}
+
type RestoreFromBackupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -6940,7 +7145,7 @@ type RestoreFromBackupResponse struct {
func (x *RestoreFromBackupResponse) Reset() {
*x = RestoreFromBackupResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[121]
+ mi := &file_vtctldata_proto_msgTypes[124]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -6953,7 +7158,7 @@ func (x *RestoreFromBackupResponse) String() string {
func (*RestoreFromBackupResponse) ProtoMessage() {}
func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[121]
+ mi := &file_vtctldata_proto_msgTypes[124]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6966,7 +7171,7 @@ func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead.
func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{121}
+ return file_vtctldata_proto_rawDescGZIP(), []int{124}
}
func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias {
@@ -7008,7 +7213,7 @@ type RunHealthCheckRequest struct {
func (x *RunHealthCheckRequest) Reset() {
*x = RunHealthCheckRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[122]
+ mi := &file_vtctldata_proto_msgTypes[125]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7021,7 +7226,7 @@ func (x *RunHealthCheckRequest) String() string {
func (*RunHealthCheckRequest) ProtoMessage() {}
func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[122]
+ mi := &file_vtctldata_proto_msgTypes[125]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7034,7 +7239,7 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead.
func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{122}
+ return file_vtctldata_proto_rawDescGZIP(), []int{125}
}
func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -7053,7 +7258,7 @@ type RunHealthCheckResponse struct {
func (x *RunHealthCheckResponse) Reset() {
*x = RunHealthCheckResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[123]
+ mi := &file_vtctldata_proto_msgTypes[126]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7066,7 +7271,7 @@ func (x *RunHealthCheckResponse) String() string {
func (*RunHealthCheckResponse) ProtoMessage() {}
func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[123]
+ mi := &file_vtctldata_proto_msgTypes[126]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7079,7 +7284,7 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead.
func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{123}
+ return file_vtctldata_proto_rawDescGZIP(), []int{126}
}
type SetKeyspaceDurabilityPolicyRequest struct {
@@ -7094,7 +7299,7 @@ type SetKeyspaceDurabilityPolicyRequest struct {
func (x *SetKeyspaceDurabilityPolicyRequest) Reset() {
*x = SetKeyspaceDurabilityPolicyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[124]
+ mi := &file_vtctldata_proto_msgTypes[127]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7107,7 +7312,7 @@ func (x *SetKeyspaceDurabilityPolicyRequest) String() string {
func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {}
func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[124]
+ mi := &file_vtctldata_proto_msgTypes[127]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7120,7 +7325,7 @@ func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead.
func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{124}
+ return file_vtctldata_proto_rawDescGZIP(), []int{127}
}
func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string {
@@ -7149,7 +7354,7 @@ type SetKeyspaceDurabilityPolicyResponse struct {
func (x *SetKeyspaceDurabilityPolicyResponse) Reset() {
*x = SetKeyspaceDurabilityPolicyResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[125]
+ mi := &file_vtctldata_proto_msgTypes[128]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7162,7 +7367,7 @@ func (x *SetKeyspaceDurabilityPolicyResponse) String() string {
func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {}
func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[125]
+ mi := &file_vtctldata_proto_msgTypes[128]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7175,7 +7380,7 @@ func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Messag
// Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead.
func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{125}
+ return file_vtctldata_proto_rawDescGZIP(), []int{128}
}
func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace {
@@ -7200,7 +7405,7 @@ type SetKeyspaceServedFromRequest struct {
func (x *SetKeyspaceServedFromRequest) Reset() {
*x = SetKeyspaceServedFromRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[126]
+ mi := &file_vtctldata_proto_msgTypes[129]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7213,7 +7418,7 @@ func (x *SetKeyspaceServedFromRequest) String() string {
func (*SetKeyspaceServedFromRequest) ProtoMessage() {}
func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[126]
+ mi := &file_vtctldata_proto_msgTypes[129]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7226,7 +7431,7 @@ func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead.
func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{126}
+ return file_vtctldata_proto_rawDescGZIP(), []int{129}
}
func (x *SetKeyspaceServedFromRequest) GetKeyspace() string {
@@ -7276,7 +7481,7 @@ type SetKeyspaceServedFromResponse struct {
func (x *SetKeyspaceServedFromResponse) Reset() {
*x = SetKeyspaceServedFromResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[127]
+ mi := &file_vtctldata_proto_msgTypes[130]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7289,7 +7494,7 @@ func (x *SetKeyspaceServedFromResponse) String() string {
func (*SetKeyspaceServedFromResponse) ProtoMessage() {}
func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[127]
+ mi := &file_vtctldata_proto_msgTypes[130]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7302,7 +7507,7 @@ func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead.
func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{127}
+ return file_vtctldata_proto_rawDescGZIP(), []int{130}
}
func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace {
@@ -7324,7 +7529,7 @@ type SetKeyspaceShardingInfoRequest struct {
func (x *SetKeyspaceShardingInfoRequest) Reset() {
*x = SetKeyspaceShardingInfoRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[128]
+ mi := &file_vtctldata_proto_msgTypes[131]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7337,7 +7542,7 @@ func (x *SetKeyspaceShardingInfoRequest) String() string {
func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {}
func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[128]
+ mi := &file_vtctldata_proto_msgTypes[131]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7350,7 +7555,7 @@ func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead.
func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{128}
+ return file_vtctldata_proto_rawDescGZIP(), []int{131}
}
func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string {
@@ -7379,7 +7584,7 @@ type SetKeyspaceShardingInfoResponse struct {
func (x *SetKeyspaceShardingInfoResponse) Reset() {
*x = SetKeyspaceShardingInfoResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[129]
+ mi := &file_vtctldata_proto_msgTypes[132]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7392,7 +7597,7 @@ func (x *SetKeyspaceShardingInfoResponse) String() string {
func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {}
func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[129]
+ mi := &file_vtctldata_proto_msgTypes[132]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7405,7 +7610,7 @@ func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead.
func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{129}
+ return file_vtctldata_proto_rawDescGZIP(), []int{132}
}
func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace {
@@ -7428,7 +7633,7 @@ type SetShardIsPrimaryServingRequest struct {
func (x *SetShardIsPrimaryServingRequest) Reset() {
*x = SetShardIsPrimaryServingRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[130]
+ mi := &file_vtctldata_proto_msgTypes[133]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7441,7 +7646,7 @@ func (x *SetShardIsPrimaryServingRequest) String() string {
func (*SetShardIsPrimaryServingRequest) ProtoMessage() {}
func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[130]
+ mi := &file_vtctldata_proto_msgTypes[133]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7454,7 +7659,7 @@ func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead.
func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{130}
+ return file_vtctldata_proto_rawDescGZIP(), []int{133}
}
func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string {
@@ -7490,7 +7695,7 @@ type SetShardIsPrimaryServingResponse struct {
func (x *SetShardIsPrimaryServingResponse) Reset() {
*x = SetShardIsPrimaryServingResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[131]
+ mi := &file_vtctldata_proto_msgTypes[134]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7503,7 +7708,7 @@ func (x *SetShardIsPrimaryServingResponse) String() string {
func (*SetShardIsPrimaryServingResponse) ProtoMessage() {}
func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[131]
+ mi := &file_vtctldata_proto_msgTypes[134]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7516,7 +7721,7 @@ func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead.
func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{131}
+ return file_vtctldata_proto_rawDescGZIP(), []int{134}
}
func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard {
@@ -7557,7 +7762,7 @@ type SetShardTabletControlRequest struct {
func (x *SetShardTabletControlRequest) Reset() {
*x = SetShardTabletControlRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[132]
+ mi := &file_vtctldata_proto_msgTypes[135]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7570,7 +7775,7 @@ func (x *SetShardTabletControlRequest) String() string {
func (*SetShardTabletControlRequest) ProtoMessage() {}
func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[132]
+ mi := &file_vtctldata_proto_msgTypes[135]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7583,7 +7788,7 @@ func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead.
func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{132}
+ return file_vtctldata_proto_rawDescGZIP(), []int{135}
}
func (x *SetShardTabletControlRequest) GetKeyspace() string {
@@ -7647,7 +7852,7 @@ type SetShardTabletControlResponse struct {
func (x *SetShardTabletControlResponse) Reset() {
*x = SetShardTabletControlResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[133]
+ mi := &file_vtctldata_proto_msgTypes[136]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7660,7 +7865,7 @@ func (x *SetShardTabletControlResponse) String() string {
func (*SetShardTabletControlResponse) ProtoMessage() {}
func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[133]
+ mi := &file_vtctldata_proto_msgTypes[136]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7673,7 +7878,7 @@ func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead.
func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{133}
+ return file_vtctldata_proto_rawDescGZIP(), []int{136}
}
func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard {
@@ -7695,7 +7900,7 @@ type SetWritableRequest struct {
func (x *SetWritableRequest) Reset() {
*x = SetWritableRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[134]
+ mi := &file_vtctldata_proto_msgTypes[137]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7708,7 +7913,7 @@ func (x *SetWritableRequest) String() string {
func (*SetWritableRequest) ProtoMessage() {}
func (x *SetWritableRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[134]
+ mi := &file_vtctldata_proto_msgTypes[137]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7721,7 +7926,7 @@ func (x *SetWritableRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead.
func (*SetWritableRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{134}
+ return file_vtctldata_proto_rawDescGZIP(), []int{137}
}
func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -7747,7 +7952,7 @@ type SetWritableResponse struct {
func (x *SetWritableResponse) Reset() {
*x = SetWritableResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[135]
+ mi := &file_vtctldata_proto_msgTypes[138]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7760,7 +7965,7 @@ func (x *SetWritableResponse) String() string {
func (*SetWritableResponse) ProtoMessage() {}
func (x *SetWritableResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[135]
+ mi := &file_vtctldata_proto_msgTypes[138]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7773,7 +7978,7 @@ func (x *SetWritableResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead.
func (*SetWritableResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{135}
+ return file_vtctldata_proto_rawDescGZIP(), []int{138}
}
type ShardReplicationAddRequest struct {
@@ -7789,7 +7994,7 @@ type ShardReplicationAddRequest struct {
func (x *ShardReplicationAddRequest) Reset() {
*x = ShardReplicationAddRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[136]
+ mi := &file_vtctldata_proto_msgTypes[139]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7802,7 +8007,7 @@ func (x *ShardReplicationAddRequest) String() string {
func (*ShardReplicationAddRequest) ProtoMessage() {}
func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[136]
+ mi := &file_vtctldata_proto_msgTypes[139]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7815,7 +8020,7 @@ func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{136}
+ return file_vtctldata_proto_rawDescGZIP(), []int{139}
}
func (x *ShardReplicationAddRequest) GetKeyspace() string {
@@ -7848,7 +8053,7 @@ type ShardReplicationAddResponse struct {
func (x *ShardReplicationAddResponse) Reset() {
*x = ShardReplicationAddResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[137]
+ mi := &file_vtctldata_proto_msgTypes[140]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7861,7 +8066,7 @@ func (x *ShardReplicationAddResponse) String() string {
func (*ShardReplicationAddResponse) ProtoMessage() {}
func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[137]
+ mi := &file_vtctldata_proto_msgTypes[140]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7874,7 +8079,7 @@ func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{137}
+ return file_vtctldata_proto_rawDescGZIP(), []int{140}
}
type ShardReplicationFixRequest struct {
@@ -7890,7 +8095,7 @@ type ShardReplicationFixRequest struct {
func (x *ShardReplicationFixRequest) Reset() {
*x = ShardReplicationFixRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[138]
+ mi := &file_vtctldata_proto_msgTypes[141]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7903,7 +8108,7 @@ func (x *ShardReplicationFixRequest) String() string {
func (*ShardReplicationFixRequest) ProtoMessage() {}
func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[138]
+ mi := &file_vtctldata_proto_msgTypes[141]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7916,7 +8121,7 @@ func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{138}
+ return file_vtctldata_proto_rawDescGZIP(), []int{141}
}
func (x *ShardReplicationFixRequest) GetKeyspace() string {
@@ -7954,7 +8159,7 @@ type ShardReplicationFixResponse struct {
func (x *ShardReplicationFixResponse) Reset() {
*x = ShardReplicationFixResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[139]
+ mi := &file_vtctldata_proto_msgTypes[142]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -7967,7 +8172,7 @@ func (x *ShardReplicationFixResponse) String() string {
func (*ShardReplicationFixResponse) ProtoMessage() {}
func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[139]
+ mi := &file_vtctldata_proto_msgTypes[142]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7980,7 +8185,7 @@ func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{139}
+ return file_vtctldata_proto_rawDescGZIP(), []int{142}
}
func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError {
@@ -8002,7 +8207,7 @@ type ShardReplicationPositionsRequest struct {
func (x *ShardReplicationPositionsRequest) Reset() {
*x = ShardReplicationPositionsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[140]
+ mi := &file_vtctldata_proto_msgTypes[143]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8015,7 +8220,7 @@ func (x *ShardReplicationPositionsRequest) String() string {
func (*ShardReplicationPositionsRequest) ProtoMessage() {}
func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[140]
+ mi := &file_vtctldata_proto_msgTypes[143]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8028,7 +8233,7 @@ func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{140}
+ return file_vtctldata_proto_rawDescGZIP(), []int{143}
}
func (x *ShardReplicationPositionsRequest) GetKeyspace() string {
@@ -8061,7 +8266,7 @@ type ShardReplicationPositionsResponse struct {
func (x *ShardReplicationPositionsResponse) Reset() {
*x = ShardReplicationPositionsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[141]
+ mi := &file_vtctldata_proto_msgTypes[144]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8074,7 +8279,7 @@ func (x *ShardReplicationPositionsResponse) String() string {
func (*ShardReplicationPositionsResponse) ProtoMessage() {}
func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[141]
+ mi := &file_vtctldata_proto_msgTypes[144]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8087,7 +8292,7 @@ func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message
// Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{141}
+ return file_vtctldata_proto_rawDescGZIP(), []int{144}
}
func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status {
@@ -8117,7 +8322,7 @@ type ShardReplicationRemoveRequest struct {
func (x *ShardReplicationRemoveRequest) Reset() {
*x = ShardReplicationRemoveRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[142]
+ mi := &file_vtctldata_proto_msgTypes[145]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8130,7 +8335,7 @@ func (x *ShardReplicationRemoveRequest) String() string {
func (*ShardReplicationRemoveRequest) ProtoMessage() {}
func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[142]
+ mi := &file_vtctldata_proto_msgTypes[145]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8143,7 +8348,7 @@ func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead.
func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{142}
+ return file_vtctldata_proto_rawDescGZIP(), []int{145}
}
func (x *ShardReplicationRemoveRequest) GetKeyspace() string {
@@ -8176,7 +8381,7 @@ type ShardReplicationRemoveResponse struct {
func (x *ShardReplicationRemoveResponse) Reset() {
*x = ShardReplicationRemoveResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[143]
+ mi := &file_vtctldata_proto_msgTypes[146]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8189,7 +8394,7 @@ func (x *ShardReplicationRemoveResponse) String() string {
func (*ShardReplicationRemoveResponse) ProtoMessage() {}
func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[143]
+ mi := &file_vtctldata_proto_msgTypes[146]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8202,7 +8407,7 @@ func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead.
func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{143}
+ return file_vtctldata_proto_rawDescGZIP(), []int{146}
}
type SleepTabletRequest struct {
@@ -8217,7 +8422,7 @@ type SleepTabletRequest struct {
func (x *SleepTabletRequest) Reset() {
*x = SleepTabletRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[144]
+ mi := &file_vtctldata_proto_msgTypes[147]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8230,7 +8435,7 @@ func (x *SleepTabletRequest) String() string {
func (*SleepTabletRequest) ProtoMessage() {}
func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[144]
+ mi := &file_vtctldata_proto_msgTypes[147]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8243,7 +8448,7 @@ func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead.
func (*SleepTabletRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{144}
+ return file_vtctldata_proto_rawDescGZIP(), []int{147}
}
func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -8269,7 +8474,7 @@ type SleepTabletResponse struct {
func (x *SleepTabletResponse) Reset() {
*x = SleepTabletResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[145]
+ mi := &file_vtctldata_proto_msgTypes[148]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8282,7 +8487,7 @@ func (x *SleepTabletResponse) String() string {
func (*SleepTabletResponse) ProtoMessage() {}
func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[145]
+ mi := &file_vtctldata_proto_msgTypes[148]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8295,7 +8500,7 @@ func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead.
func (*SleepTabletResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{145}
+ return file_vtctldata_proto_rawDescGZIP(), []int{148}
}
type SourceShardAddRequest struct {
@@ -8319,7 +8524,7 @@ type SourceShardAddRequest struct {
func (x *SourceShardAddRequest) Reset() {
*x = SourceShardAddRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[146]
+ mi := &file_vtctldata_proto_msgTypes[149]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8332,7 +8537,7 @@ func (x *SourceShardAddRequest) String() string {
func (*SourceShardAddRequest) ProtoMessage() {}
func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[146]
+ mi := &file_vtctldata_proto_msgTypes[149]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8345,7 +8550,7 @@ func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead.
func (*SourceShardAddRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{146}
+ return file_vtctldata_proto_rawDescGZIP(), []int{149}
}
func (x *SourceShardAddRequest) GetKeyspace() string {
@@ -8409,7 +8614,7 @@ type SourceShardAddResponse struct {
func (x *SourceShardAddResponse) Reset() {
*x = SourceShardAddResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[147]
+ mi := &file_vtctldata_proto_msgTypes[150]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8422,7 +8627,7 @@ func (x *SourceShardAddResponse) String() string {
func (*SourceShardAddResponse) ProtoMessage() {}
func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[147]
+ mi := &file_vtctldata_proto_msgTypes[150]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8435,7 +8640,7 @@ func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead.
func (*SourceShardAddResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{147}
+ return file_vtctldata_proto_rawDescGZIP(), []int{150}
}
func (x *SourceShardAddResponse) GetShard() *topodata.Shard {
@@ -8458,7 +8663,7 @@ type SourceShardDeleteRequest struct {
func (x *SourceShardDeleteRequest) Reset() {
*x = SourceShardDeleteRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[148]
+ mi := &file_vtctldata_proto_msgTypes[151]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8471,7 +8676,7 @@ func (x *SourceShardDeleteRequest) String() string {
func (*SourceShardDeleteRequest) ProtoMessage() {}
func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[148]
+ mi := &file_vtctldata_proto_msgTypes[151]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8484,7 +8689,7 @@ func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead.
func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{148}
+ return file_vtctldata_proto_rawDescGZIP(), []int{151}
}
func (x *SourceShardDeleteRequest) GetKeyspace() string {
@@ -8520,7 +8725,7 @@ type SourceShardDeleteResponse struct {
func (x *SourceShardDeleteResponse) Reset() {
*x = SourceShardDeleteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[149]
+ mi := &file_vtctldata_proto_msgTypes[152]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8533,7 +8738,7 @@ func (x *SourceShardDeleteResponse) String() string {
func (*SourceShardDeleteResponse) ProtoMessage() {}
func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[149]
+ mi := &file_vtctldata_proto_msgTypes[152]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8546,7 +8751,7 @@ func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead.
func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{149}
+ return file_vtctldata_proto_rawDescGZIP(), []int{152}
}
func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard {
@@ -8567,7 +8772,7 @@ type StartReplicationRequest struct {
func (x *StartReplicationRequest) Reset() {
*x = StartReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[150]
+ mi := &file_vtctldata_proto_msgTypes[153]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8580,7 +8785,7 @@ func (x *StartReplicationRequest) String() string {
func (*StartReplicationRequest) ProtoMessage() {}
func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[150]
+ mi := &file_vtctldata_proto_msgTypes[153]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8593,7 +8798,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead.
func (*StartReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{150}
+ return file_vtctldata_proto_rawDescGZIP(), []int{153}
}
func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -8612,7 +8817,7 @@ type StartReplicationResponse struct {
func (x *StartReplicationResponse) Reset() {
*x = StartReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[151]
+ mi := &file_vtctldata_proto_msgTypes[154]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8625,7 +8830,7 @@ func (x *StartReplicationResponse) String() string {
func (*StartReplicationResponse) ProtoMessage() {}
func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[151]
+ mi := &file_vtctldata_proto_msgTypes[154]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8638,7 +8843,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead.
func (*StartReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{151}
+ return file_vtctldata_proto_rawDescGZIP(), []int{154}
}
type StopReplicationRequest struct {
@@ -8652,7 +8857,7 @@ type StopReplicationRequest struct {
func (x *StopReplicationRequest) Reset() {
*x = StopReplicationRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[152]
+ mi := &file_vtctldata_proto_msgTypes[155]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8665,7 +8870,7 @@ func (x *StopReplicationRequest) String() string {
func (*StopReplicationRequest) ProtoMessage() {}
func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[152]
+ mi := &file_vtctldata_proto_msgTypes[155]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8678,7 +8883,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead.
func (*StopReplicationRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{152}
+ return file_vtctldata_proto_rawDescGZIP(), []int{155}
}
func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias {
@@ -8697,7 +8902,7 @@ type StopReplicationResponse struct {
func (x *StopReplicationResponse) Reset() {
*x = StopReplicationResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[153]
+ mi := &file_vtctldata_proto_msgTypes[156]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8710,7 +8915,7 @@ func (x *StopReplicationResponse) String() string {
func (*StopReplicationResponse) ProtoMessage() {}
func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[153]
+ mi := &file_vtctldata_proto_msgTypes[156]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8723,7 +8928,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead.
func (*StopReplicationResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{153}
+ return file_vtctldata_proto_rawDescGZIP(), []int{156}
}
type TabletExternallyReparentedRequest struct {
@@ -8739,7 +8944,7 @@ type TabletExternallyReparentedRequest struct {
func (x *TabletExternallyReparentedRequest) Reset() {
*x = TabletExternallyReparentedRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[154]
+ mi := &file_vtctldata_proto_msgTypes[157]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8752,7 +8957,7 @@ func (x *TabletExternallyReparentedRequest) String() string {
func (*TabletExternallyReparentedRequest) ProtoMessage() {}
func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[154]
+ mi := &file_vtctldata_proto_msgTypes[157]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8765,7 +8970,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message
// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead.
func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{154}
+ return file_vtctldata_proto_rawDescGZIP(), []int{157}
}
func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias {
@@ -8789,7 +8994,7 @@ type TabletExternallyReparentedResponse struct {
func (x *TabletExternallyReparentedResponse) Reset() {
*x = TabletExternallyReparentedResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[155]
+ mi := &file_vtctldata_proto_msgTypes[158]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8802,7 +9007,7 @@ func (x *TabletExternallyReparentedResponse) String() string {
func (*TabletExternallyReparentedResponse) ProtoMessage() {}
func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[155]
+ mi := &file_vtctldata_proto_msgTypes[158]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8815,7 +9020,7 @@ func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message
// Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead.
func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{155}
+ return file_vtctldata_proto_rawDescGZIP(), []int{158}
}
func (x *TabletExternallyReparentedResponse) GetKeyspace() string {
@@ -8858,7 +9063,7 @@ type UpdateCellInfoRequest struct {
func (x *UpdateCellInfoRequest) Reset() {
*x = UpdateCellInfoRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[156]
+ mi := &file_vtctldata_proto_msgTypes[159]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8871,7 +9076,7 @@ func (x *UpdateCellInfoRequest) String() string {
func (*UpdateCellInfoRequest) ProtoMessage() {}
func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[156]
+ mi := &file_vtctldata_proto_msgTypes[159]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8884,7 +9089,7 @@ func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead.
func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{156}
+ return file_vtctldata_proto_rawDescGZIP(), []int{159}
}
func (x *UpdateCellInfoRequest) GetName() string {
@@ -8913,7 +9118,7 @@ type UpdateCellInfoResponse struct {
func (x *UpdateCellInfoResponse) Reset() {
*x = UpdateCellInfoResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[157]
+ mi := &file_vtctldata_proto_msgTypes[160]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8926,7 +9131,7 @@ func (x *UpdateCellInfoResponse) String() string {
func (*UpdateCellInfoResponse) ProtoMessage() {}
func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[157]
+ mi := &file_vtctldata_proto_msgTypes[160]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8939,7 +9144,7 @@ func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead.
func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{157}
+ return file_vtctldata_proto_rawDescGZIP(), []int{160}
}
func (x *UpdateCellInfoResponse) GetName() string {
@@ -8968,7 +9173,7 @@ type UpdateCellsAliasRequest struct {
func (x *UpdateCellsAliasRequest) Reset() {
*x = UpdateCellsAliasRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[158]
+ mi := &file_vtctldata_proto_msgTypes[161]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -8981,7 +9186,7 @@ func (x *UpdateCellsAliasRequest) String() string {
func (*UpdateCellsAliasRequest) ProtoMessage() {}
func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[158]
+ mi := &file_vtctldata_proto_msgTypes[161]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -8994,7 +9199,7 @@ func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead.
func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{158}
+ return file_vtctldata_proto_rawDescGZIP(), []int{161}
}
func (x *UpdateCellsAliasRequest) GetName() string {
@@ -9023,7 +9228,7 @@ type UpdateCellsAliasResponse struct {
func (x *UpdateCellsAliasResponse) Reset() {
*x = UpdateCellsAliasResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[159]
+ mi := &file_vtctldata_proto_msgTypes[162]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9036,7 +9241,7 @@ func (x *UpdateCellsAliasResponse) String() string {
func (*UpdateCellsAliasResponse) ProtoMessage() {}
func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[159]
+ mi := &file_vtctldata_proto_msgTypes[162]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9049,7 +9254,7 @@ func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead.
func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{159}
+ return file_vtctldata_proto_rawDescGZIP(), []int{162}
}
func (x *UpdateCellsAliasResponse) GetName() string {
@@ -9077,7 +9282,7 @@ type ValidateRequest struct {
func (x *ValidateRequest) Reset() {
*x = ValidateRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[160]
+ mi := &file_vtctldata_proto_msgTypes[163]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9090,7 +9295,7 @@ func (x *ValidateRequest) String() string {
func (*ValidateRequest) ProtoMessage() {}
func (x *ValidateRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[160]
+ mi := &file_vtctldata_proto_msgTypes[163]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9103,7 +9308,7 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead.
func (*ValidateRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{160}
+ return file_vtctldata_proto_rawDescGZIP(), []int{163}
}
func (x *ValidateRequest) GetPingTablets() bool {
@@ -9125,7 +9330,7 @@ type ValidateResponse struct {
func (x *ValidateResponse) Reset() {
*x = ValidateResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[161]
+ mi := &file_vtctldata_proto_msgTypes[164]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9138,7 +9343,7 @@ func (x *ValidateResponse) String() string {
func (*ValidateResponse) ProtoMessage() {}
func (x *ValidateResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[161]
+ mi := &file_vtctldata_proto_msgTypes[164]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9151,7 +9356,7 @@ func (x *ValidateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead.
func (*ValidateResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{161}
+ return file_vtctldata_proto_rawDescGZIP(), []int{164}
}
func (x *ValidateResponse) GetResults() []string {
@@ -9180,7 +9385,7 @@ type ValidateKeyspaceRequest struct {
func (x *ValidateKeyspaceRequest) Reset() {
*x = ValidateKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[162]
+ mi := &file_vtctldata_proto_msgTypes[165]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9193,7 +9398,7 @@ func (x *ValidateKeyspaceRequest) String() string {
func (*ValidateKeyspaceRequest) ProtoMessage() {}
func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[162]
+ mi := &file_vtctldata_proto_msgTypes[165]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9206,7 +9411,7 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{162}
+ return file_vtctldata_proto_rawDescGZIP(), []int{165}
}
func (x *ValidateKeyspaceRequest) GetKeyspace() string {
@@ -9235,7 +9440,7 @@ type ValidateKeyspaceResponse struct {
func (x *ValidateKeyspaceResponse) Reset() {
*x = ValidateKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[163]
+ mi := &file_vtctldata_proto_msgTypes[166]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9248,7 +9453,7 @@ func (x *ValidateKeyspaceResponse) String() string {
func (*ValidateKeyspaceResponse) ProtoMessage() {}
func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[163]
+ mi := &file_vtctldata_proto_msgTypes[166]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9261,7 +9466,7 @@ func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{163}
+ return file_vtctldata_proto_rawDescGZIP(), []int{166}
}
func (x *ValidateKeyspaceResponse) GetResults() []string {
@@ -9293,7 +9498,7 @@ type ValidateSchemaKeyspaceRequest struct {
func (x *ValidateSchemaKeyspaceRequest) Reset() {
*x = ValidateSchemaKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[164]
+ mi := &file_vtctldata_proto_msgTypes[167]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9306,7 +9511,7 @@ func (x *ValidateSchemaKeyspaceRequest) String() string {
func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {}
func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[164]
+ mi := &file_vtctldata_proto_msgTypes[167]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9319,7 +9524,7 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{164}
+ return file_vtctldata_proto_rawDescGZIP(), []int{167}
}
func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string {
@@ -9369,7 +9574,7 @@ type ValidateSchemaKeyspaceResponse struct {
func (x *ValidateSchemaKeyspaceResponse) Reset() {
*x = ValidateSchemaKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[165]
+ mi := &file_vtctldata_proto_msgTypes[168]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9382,7 +9587,7 @@ func (x *ValidateSchemaKeyspaceResponse) String() string {
func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {}
func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[165]
+ mi := &file_vtctldata_proto_msgTypes[168]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9395,7 +9600,7 @@ func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{165}
+ return file_vtctldata_proto_rawDescGZIP(), []int{168}
}
func (x *ValidateSchemaKeyspaceResponse) GetResults() []string {
@@ -9425,7 +9630,7 @@ type ValidateShardRequest struct {
func (x *ValidateShardRequest) Reset() {
*x = ValidateShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[166]
+ mi := &file_vtctldata_proto_msgTypes[169]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9438,7 +9643,7 @@ func (x *ValidateShardRequest) String() string {
func (*ValidateShardRequest) ProtoMessage() {}
func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[166]
+ mi := &file_vtctldata_proto_msgTypes[169]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9451,7 +9656,7 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead.
func (*ValidateShardRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{166}
+ return file_vtctldata_proto_rawDescGZIP(), []int{169}
}
func (x *ValidateShardRequest) GetKeyspace() string {
@@ -9486,7 +9691,7 @@ type ValidateShardResponse struct {
func (x *ValidateShardResponse) Reset() {
*x = ValidateShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[167]
+ mi := &file_vtctldata_proto_msgTypes[170]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9499,7 +9704,7 @@ func (x *ValidateShardResponse) String() string {
func (*ValidateShardResponse) ProtoMessage() {}
func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[167]
+ mi := &file_vtctldata_proto_msgTypes[170]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9512,7 +9717,7 @@ func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead.
func (*ValidateShardResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{167}
+ return file_vtctldata_proto_rawDescGZIP(), []int{170}
}
func (x *ValidateShardResponse) GetResults() []string {
@@ -9533,7 +9738,7 @@ type ValidateVersionKeyspaceRequest struct {
func (x *ValidateVersionKeyspaceRequest) Reset() {
*x = ValidateVersionKeyspaceRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[168]
+ mi := &file_vtctldata_proto_msgTypes[171]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9546,7 +9751,7 @@ func (x *ValidateVersionKeyspaceRequest) String() string {
func (*ValidateVersionKeyspaceRequest) ProtoMessage() {}
func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[168]
+ mi := &file_vtctldata_proto_msgTypes[171]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9559,7 +9764,7 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead.
func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{168}
+ return file_vtctldata_proto_rawDescGZIP(), []int{171}
}
func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string {
@@ -9581,7 +9786,7 @@ type ValidateVersionKeyspaceResponse struct {
func (x *ValidateVersionKeyspaceResponse) Reset() {
*x = ValidateVersionKeyspaceResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[169]
+ mi := &file_vtctldata_proto_msgTypes[172]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9594,7 +9799,7 @@ func (x *ValidateVersionKeyspaceResponse) String() string {
func (*ValidateVersionKeyspaceResponse) ProtoMessage() {}
func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[169]
+ mi := &file_vtctldata_proto_msgTypes[172]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9607,7 +9812,7 @@ func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead.
func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{169}
+ return file_vtctldata_proto_rawDescGZIP(), []int{172}
}
func (x *ValidateVersionKeyspaceResponse) GetResults() []string {
@@ -9624,6 +9829,108 @@ func (x *ValidateVersionKeyspaceResponse) GetResultsByShard() map[string]*Valida
return nil
}
+type ValidateVersionShardRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"`
+}
+
+func (x *ValidateVersionShardRequest) Reset() {
+ *x = ValidateVersionShardRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[173]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateVersionShardRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateVersionShardRequest) ProtoMessage() {}
+
+func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[173]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead.
+func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{173}
+}
+
+func (x *ValidateVersionShardRequest) GetKeyspace() string {
+ if x != nil {
+ return x.Keyspace
+ }
+ return ""
+}
+
+func (x *ValidateVersionShardRequest) GetShard() string {
+ if x != nil {
+ return x.Shard
+ }
+ return ""
+}
+
+type ValidateVersionShardResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *ValidateVersionShardResponse) Reset() {
+ *x = ValidateVersionShardResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_vtctldata_proto_msgTypes[174]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ValidateVersionShardResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ValidateVersionShardResponse) ProtoMessage() {}
+
+func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_vtctldata_proto_msgTypes[174]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead.
+func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) {
+ return file_vtctldata_proto_rawDescGZIP(), []int{174}
+}
+
+func (x *ValidateVersionShardResponse) GetResults() []string {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
type ValidateVSchemaRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -9638,7 +9945,7 @@ type ValidateVSchemaRequest struct {
func (x *ValidateVSchemaRequest) Reset() {
*x = ValidateVSchemaRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[170]
+ mi := &file_vtctldata_proto_msgTypes[175]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9651,7 +9958,7 @@ func (x *ValidateVSchemaRequest) String() string {
func (*ValidateVSchemaRequest) ProtoMessage() {}
func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[170]
+ mi := &file_vtctldata_proto_msgTypes[175]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9664,7 +9971,7 @@ func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead.
func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{170}
+ return file_vtctldata_proto_rawDescGZIP(), []int{175}
}
func (x *ValidateVSchemaRequest) GetKeyspace() string {
@@ -9707,7 +10014,7 @@ type ValidateVSchemaResponse struct {
func (x *ValidateVSchemaResponse) Reset() {
*x = ValidateVSchemaResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[171]
+ mi := &file_vtctldata_proto_msgTypes[176]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9720,7 +10027,7 @@ func (x *ValidateVSchemaResponse) String() string {
func (*ValidateVSchemaResponse) ProtoMessage() {}
func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[171]
+ mi := &file_vtctldata_proto_msgTypes[176]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9733,7 +10040,7 @@ func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead.
func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) {
- return file_vtctldata_proto_rawDescGZIP(), []int{171}
+ return file_vtctldata_proto_rawDescGZIP(), []int{176}
}
func (x *ValidateVSchemaResponse) GetResults() []string {
@@ -9762,7 +10069,7 @@ type Workflow_ReplicationLocation struct {
func (x *Workflow_ReplicationLocation) Reset() {
*x = Workflow_ReplicationLocation{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[173]
+ mi := &file_vtctldata_proto_msgTypes[178]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9775,7 +10082,7 @@ func (x *Workflow_ReplicationLocation) String() string {
func (*Workflow_ReplicationLocation) ProtoMessage() {}
func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[173]
+ mi := &file_vtctldata_proto_msgTypes[178]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9818,7 +10125,7 @@ type Workflow_ShardStream struct {
func (x *Workflow_ShardStream) Reset() {
*x = Workflow_ShardStream{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[174]
+ mi := &file_vtctldata_proto_msgTypes[179]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9831,7 +10138,7 @@ func (x *Workflow_ShardStream) String() string {
func (*Workflow_ShardStream) ProtoMessage() {}
func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[174]
+ mi := &file_vtctldata_proto_msgTypes[179]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -9901,7 +10208,7 @@ type Workflow_Stream struct {
func (x *Workflow_Stream) Reset() {
*x = Workflow_Stream{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[175]
+ mi := &file_vtctldata_proto_msgTypes[180]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -9914,7 +10221,7 @@ func (x *Workflow_Stream) String() string {
func (*Workflow_Stream) ProtoMessage() {}
func (x *Workflow_Stream) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[175]
+ mi := &file_vtctldata_proto_msgTypes[180]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10047,7 +10354,7 @@ type Workflow_Stream_CopyState struct {
func (x *Workflow_Stream_CopyState) Reset() {
*x = Workflow_Stream_CopyState{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[176]
+ mi := &file_vtctldata_proto_msgTypes[181]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -10060,7 +10367,7 @@ func (x *Workflow_Stream_CopyState) String() string {
func (*Workflow_Stream_CopyState) ProtoMessage() {}
func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[176]
+ mi := &file_vtctldata_proto_msgTypes[181]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10108,7 +10415,7 @@ type Workflow_Stream_Log struct {
func (x *Workflow_Stream_Log) Reset() {
*x = Workflow_Stream_Log{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[177]
+ mi := &file_vtctldata_proto_msgTypes[182]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -10121,7 +10428,7 @@ func (x *Workflow_Stream_Log) String() string {
func (*Workflow_Stream_Log) ProtoMessage() {}
func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[177]
+ mi := &file_vtctldata_proto_msgTypes[182]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10204,7 +10511,7 @@ type GetSrvKeyspaceNamesResponse_NameList struct {
func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() {
*x = GetSrvKeyspaceNamesResponse_NameList{}
if protoimpl.UnsafeEnabled {
- mi := &file_vtctldata_proto_msgTypes[181]
+ mi := &file_vtctldata_proto_msgTypes[186]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -10217,7 +10524,7 @@ func (x *GetSrvKeyspaceNamesResponse_NameList) String() string {
func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {}
func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message {
- mi := &file_vtctldata_proto_msgTypes[181]
+ mi := &file_vtctldata_proto_msgTypes[186]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -10498,7 +10805,7 @@ var file_vtctldata_proto_rawDesc = []byte{
0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c,
0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x90, 0x01, 0x0a,
+ 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xcc, 0x01, 0x0a,
0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
@@ -10507,1047 +10814,1085 @@ var file_vtctldata_proto_rawDesc = []byte{
0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a,
0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22,
- 0xa2, 0x01, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24,
- 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
- 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a,
- 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63,
- 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x63, 0x79, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70,
- 0x65, 0x52, 0x06, 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79,
- 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52,
- 0x75, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x35, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b,
- 0x61, 0x66, 0x74, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77,
- 0x61, 0x73, 0x5f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x09, 0x77, 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xf1, 0x02, 0x0a, 0x15,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12,
- 0x2f, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76,
- 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61,
- 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73,
- 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f,
- 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23,
- 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74,
- 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
- 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69,
- 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22,
- 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a,
- 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
- 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73,
- 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41,
- 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22,
- 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66,
- 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63,
- 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65,
- 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a,
- 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x28, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63,
- 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65,
- 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f,
- 0x69, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12,
- 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05,
- 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a,
- 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, 0x0a, 0x18,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23,
- 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a,
- 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65,
- 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f,
- 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
- 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74,
- 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
- 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65,
- 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f,
- 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f,
- 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f,
- 0x73, 0x73, 0x43, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0xbc, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64,
- 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
- 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c,
- 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa0,
- 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41,
- 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d,
- 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d,
- 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f,
- 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x6f,
- 0x6c, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63,
- 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a,
- 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x18, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72,
- 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f,
- 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69,
- 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73,
- 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72,
- 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68,
- 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a,
- 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x78,
- 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63,
- 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x22, 0x5e, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, 0x6f, 0x6f, 0x6b,
- 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
- 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
- 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65,
- 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c,
- 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65,
- 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c,
- 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c,
- 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
- 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74,
- 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49,
- 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c,
- 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c,
- 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74,
- 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47,
- 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47,
- 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a,
- 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
+ 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12,
+ 0x28, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x62, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x0e,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38,
0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69,
- 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55,
- 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75,
- 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69,
- 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
- 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e,
- 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76,
- 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e,
- 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c,
- 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a,
- 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65,
- 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75,
+ 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20,
+ 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
+ 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x64,
+ 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6,
+ 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62,
+ 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65,
+ 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64,
+ 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x61,
+ 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xf1, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f,
+ 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a,
+ 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61,
+ 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
+ 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64,
+ 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a,
+ 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x49, 0x0a, 0x16, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61,
0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75,
- 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22,
- 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a,
- 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
- 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47,
- 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d,
- 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c,
- 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74,
- 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c,
- 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11,
- 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72,
- 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
- 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c,
- 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76,
- 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c,
- 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22,
- 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72,
- 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65,
- 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
+ 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25,
+ 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a,
+ 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
+ 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61,
+ 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43,
+ 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65,
+ 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69,
+ 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
+ 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x06, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69,
+ 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
+ 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f, 0x69, 0x66, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x76,
+ 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x66,
+ 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63,
+ 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22,
+ 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x92, 0x03, 0x0a, 0x1d, 0x45, 0x6d, 0x65,
+ 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c,
- 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54,
- 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22,
- 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b,
+ 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74,
- 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x63, 0x65, 0x6c, 0x6c,
+ 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x65,
+ 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x65,
+ 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x65, 0x78,
+ 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xbc, 0x01,
+ 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c,
- 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15,
- 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74,
- 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77,
- 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f,
- 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26,
- 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e,
- 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x1b,
- 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a,
- 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
+ 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73,
- 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e,
- 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72,
- 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03,
+ 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa0, 0x01, 0x0a,
+ 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41,
+ 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78,
+ 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78,
+ 0x52, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x22,
+ 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41,
+ 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65,
+ 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f,
+ 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
+ 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12,
+ 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77,
+ 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73,
+ 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c,
+ 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62,
+ 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47,
+ 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73,
+ 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52,
+ 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63,
+ 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
+ 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f,
- 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c,
- 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76,
- 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c,
- 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65,
- 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61,
- 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65,
- 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70,
- 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d,
- 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a,
- 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16,
- 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73,
- 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61,
+ 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22,
+ 0x5e, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22,
+ 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
+ 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01,
+ 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49,
+ 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69,
+ 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e,
+ 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18,
- 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, 0x0a,
- 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12,
- 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65,
- 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65,
- 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, 0x69,
- 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22,
+ 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74,
+ 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c,
+ 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63,
+ 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22,
+ 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69,
+ 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63,
+ 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65,
+ 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66,
+ 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6,
+ 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c,
+ 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c,
+ 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, 0x75,
+ 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74,
+ 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49,
+ 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47,
+ 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72,
+ 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61,
+ 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17,
+ 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67,
+ 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75,
+ 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65,
+ 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79,
+ 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f,
+ 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75,
+ 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x32, 0x0a,
+ 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63,
+ 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c,
+ 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73,
+ 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72,
+ 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72,
+ 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72,
+ 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65,
+ 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e,
+ 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76,
+ 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e,
+ 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d,
+ 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01,
+ 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f,
+ 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53,
+ 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73,
+ 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e,
+ 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x1b,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f,
- 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f,
- 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
- 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63,
- 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, 0x61,
- 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69,
- 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22,
- 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23,
- 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b,
- 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43,
- 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f,
- 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72,
- 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
- 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73,
+ 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f,
+ 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70,
+ 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a,
+ 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22,
+ 0x2c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61,
+ 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, 0x0a,
+ 0x17, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x52,
+ 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67,
+ 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, 0x0a,
+ 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d,
- 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a,
- 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c,
- 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72,
- 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75,
- 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4d,
+ 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c,
+ 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72,
- 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
- 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, 0x0a,
+ 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a,
+ 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b,
+ 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a,
+ 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72,
+ 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
+ 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72,
+ 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52,
+ 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74,
+ 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42,
+ 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67,
+ 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xcb, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e,
+ 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77,
+ 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
+ 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a,
+ 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76,
+ 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+ 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x12, 0x40, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f,
+ 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x72,
+ 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65,
+ 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d,
+ 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f,
+ 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67,
+ 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75,
+ 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72,
+ 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69,
0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52,
- 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x0b,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52,
- 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xad, 0x01, 0x0a, 0x19,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x51, 0x0a, 0x15, 0x52,
- 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x18,
- 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75,
- 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc8,
- 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, 0x74,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72,
- 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74,
- 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, 0x65,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e,
- 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63,
- 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, 0x04,
- 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65,
- 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e,
- 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a,
- 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14,
+ 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x52,
+ 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73,
+ 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61,
+ 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x61, 0x72, 0x74,
+ 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x1b, 0x52, 0x65,
+ 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61,
+ 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e,
+ 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
+ 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72,
+ 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbc, 0x01,
+ 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d,
+ 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f,
+ 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
+ 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19,
+ 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75,
+ 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16,
+ 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63,
+ 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63,
+ 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65,
+ 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63,
+ 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12,
+ 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69,
+ 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73,
+ 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46,
+ 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e,
- 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50,
- 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a,
- 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12,
- 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
+ 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x22, 0xb9, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46,
+ 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69,
+ 0x6e, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x65, 0x64, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x22,
+ 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a,
+ 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65,
+ 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74,
+ 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22,
+ 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
+ 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22,
+ 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b,
+ 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53,
+ 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69,
+ 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c,
0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d,
- 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a,
- 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
- 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, 0x20,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06,
+ 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a,
+ 0x1d, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e,
+ 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e,
+ 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61,
- 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f,
- 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a,
- 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72,
+ 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51,
+ 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e,
+ 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c,
- 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, 0x0a,
- 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f,
- 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f,
- 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12,
- 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a,
- 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75,
- 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f,
- 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71,
+ 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65,
+ 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73,
+ 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d,
+ 0x6f, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74,
+ 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a,
+ 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a,
+ 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, 0x17,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a,
- 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
+ 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14,
+ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d,
+ 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d,
+ 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41,
0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x21,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79,
- 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77,
- 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
- 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f,
- 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69,
- 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63,
- 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e,
- 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65,
- 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61,
- 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70,
- 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, 0x18,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b,
- 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c,
- 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c,
- 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69,
- 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, 0x16,
- 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c,
+ 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21,
- 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a,
+ 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72,
+ 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70,
+ 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61,
+ 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a,
+ 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65,
+ 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
+ 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66,
+ 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c,
+ 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74,
+ 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
+ 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c,
+ 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73,
+ 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c,
+ 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69,
+ 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01,
+ 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56,
0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42,
- 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73,
- 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
- 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a,
+ 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63,
+ 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26,
+ 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50,
+ 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22,
+ 0x88, 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x25,
- 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65,
- 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b,
- 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x73,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, 0x1e,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65,
- 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
- 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68,
- 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
- 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65,
- 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12,
+ 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69,
+ 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73,
- 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64,
- 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75,
- 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a,
- 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75,
- 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42,
- 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74,
- 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e,
- 0x0a, 0x0a, 0x4d, 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15,
- 0x0a, 0x11, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e,
- 0x44, 0x45, 0x58, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
- 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65,
+ 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x4d, 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a,
+ 0x11, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44,
+ 0x45, 0x58, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69,
+ 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -11563,7 +11908,7 @@ func file_vtctldata_proto_rawDescGZIP() []byte {
}
var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 191)
+var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 196)
var file_vtctldata_proto_goTypes = []interface{}{
(MaterializationIntent)(0), // 0: vtctldata.MaterializationIntent
(*ExecuteVtctlCommandRequest)(nil), // 1: vtctldata.ExecuteVtctlCommandRequest
@@ -11652,292 +11997,300 @@ var file_vtctldata_proto_goTypes = []interface{}{
(*GetTabletResponse)(nil), // 84: vtctldata.GetTabletResponse
(*GetTabletsRequest)(nil), // 85: vtctldata.GetTabletsRequest
(*GetTabletsResponse)(nil), // 86: vtctldata.GetTabletsResponse
- (*GetVSchemaRequest)(nil), // 87: vtctldata.GetVSchemaRequest
- (*GetVersionRequest)(nil), // 88: vtctldata.GetVersionRequest
- (*GetVersionResponse)(nil), // 89: vtctldata.GetVersionResponse
- (*GetVSchemaResponse)(nil), // 90: vtctldata.GetVSchemaResponse
- (*GetWorkflowsRequest)(nil), // 91: vtctldata.GetWorkflowsRequest
- (*GetWorkflowsResponse)(nil), // 92: vtctldata.GetWorkflowsResponse
- (*InitShardPrimaryRequest)(nil), // 93: vtctldata.InitShardPrimaryRequest
- (*InitShardPrimaryResponse)(nil), // 94: vtctldata.InitShardPrimaryResponse
- (*PingTabletRequest)(nil), // 95: vtctldata.PingTabletRequest
- (*PingTabletResponse)(nil), // 96: vtctldata.PingTabletResponse
- (*PlannedReparentShardRequest)(nil), // 97: vtctldata.PlannedReparentShardRequest
- (*PlannedReparentShardResponse)(nil), // 98: vtctldata.PlannedReparentShardResponse
- (*RebuildKeyspaceGraphRequest)(nil), // 99: vtctldata.RebuildKeyspaceGraphRequest
- (*RebuildKeyspaceGraphResponse)(nil), // 100: vtctldata.RebuildKeyspaceGraphResponse
- (*RebuildVSchemaGraphRequest)(nil), // 101: vtctldata.RebuildVSchemaGraphRequest
- (*RebuildVSchemaGraphResponse)(nil), // 102: vtctldata.RebuildVSchemaGraphResponse
- (*RefreshStateRequest)(nil), // 103: vtctldata.RefreshStateRequest
- (*RefreshStateResponse)(nil), // 104: vtctldata.RefreshStateResponse
- (*RefreshStateByShardRequest)(nil), // 105: vtctldata.RefreshStateByShardRequest
- (*RefreshStateByShardResponse)(nil), // 106: vtctldata.RefreshStateByShardResponse
- (*ReloadSchemaRequest)(nil), // 107: vtctldata.ReloadSchemaRequest
- (*ReloadSchemaResponse)(nil), // 108: vtctldata.ReloadSchemaResponse
- (*ReloadSchemaKeyspaceRequest)(nil), // 109: vtctldata.ReloadSchemaKeyspaceRequest
- (*ReloadSchemaKeyspaceResponse)(nil), // 110: vtctldata.ReloadSchemaKeyspaceResponse
- (*ReloadSchemaShardRequest)(nil), // 111: vtctldata.ReloadSchemaShardRequest
- (*ReloadSchemaShardResponse)(nil), // 112: vtctldata.ReloadSchemaShardResponse
- (*RemoveBackupRequest)(nil), // 113: vtctldata.RemoveBackupRequest
- (*RemoveBackupResponse)(nil), // 114: vtctldata.RemoveBackupResponse
- (*RemoveKeyspaceCellRequest)(nil), // 115: vtctldata.RemoveKeyspaceCellRequest
- (*RemoveKeyspaceCellResponse)(nil), // 116: vtctldata.RemoveKeyspaceCellResponse
- (*RemoveShardCellRequest)(nil), // 117: vtctldata.RemoveShardCellRequest
- (*RemoveShardCellResponse)(nil), // 118: vtctldata.RemoveShardCellResponse
- (*ReparentTabletRequest)(nil), // 119: vtctldata.ReparentTabletRequest
- (*ReparentTabletResponse)(nil), // 120: vtctldata.ReparentTabletResponse
- (*RestoreFromBackupRequest)(nil), // 121: vtctldata.RestoreFromBackupRequest
- (*RestoreFromBackupResponse)(nil), // 122: vtctldata.RestoreFromBackupResponse
- (*RunHealthCheckRequest)(nil), // 123: vtctldata.RunHealthCheckRequest
- (*RunHealthCheckResponse)(nil), // 124: vtctldata.RunHealthCheckResponse
- (*SetKeyspaceDurabilityPolicyRequest)(nil), // 125: vtctldata.SetKeyspaceDurabilityPolicyRequest
- (*SetKeyspaceDurabilityPolicyResponse)(nil), // 126: vtctldata.SetKeyspaceDurabilityPolicyResponse
- (*SetKeyspaceServedFromRequest)(nil), // 127: vtctldata.SetKeyspaceServedFromRequest
- (*SetKeyspaceServedFromResponse)(nil), // 128: vtctldata.SetKeyspaceServedFromResponse
- (*SetKeyspaceShardingInfoRequest)(nil), // 129: vtctldata.SetKeyspaceShardingInfoRequest
- (*SetKeyspaceShardingInfoResponse)(nil), // 130: vtctldata.SetKeyspaceShardingInfoResponse
- (*SetShardIsPrimaryServingRequest)(nil), // 131: vtctldata.SetShardIsPrimaryServingRequest
- (*SetShardIsPrimaryServingResponse)(nil), // 132: vtctldata.SetShardIsPrimaryServingResponse
- (*SetShardTabletControlRequest)(nil), // 133: vtctldata.SetShardTabletControlRequest
- (*SetShardTabletControlResponse)(nil), // 134: vtctldata.SetShardTabletControlResponse
- (*SetWritableRequest)(nil), // 135: vtctldata.SetWritableRequest
- (*SetWritableResponse)(nil), // 136: vtctldata.SetWritableResponse
- (*ShardReplicationAddRequest)(nil), // 137: vtctldata.ShardReplicationAddRequest
- (*ShardReplicationAddResponse)(nil), // 138: vtctldata.ShardReplicationAddResponse
- (*ShardReplicationFixRequest)(nil), // 139: vtctldata.ShardReplicationFixRequest
- (*ShardReplicationFixResponse)(nil), // 140: vtctldata.ShardReplicationFixResponse
- (*ShardReplicationPositionsRequest)(nil), // 141: vtctldata.ShardReplicationPositionsRequest
- (*ShardReplicationPositionsResponse)(nil), // 142: vtctldata.ShardReplicationPositionsResponse
- (*ShardReplicationRemoveRequest)(nil), // 143: vtctldata.ShardReplicationRemoveRequest
- (*ShardReplicationRemoveResponse)(nil), // 144: vtctldata.ShardReplicationRemoveResponse
- (*SleepTabletRequest)(nil), // 145: vtctldata.SleepTabletRequest
- (*SleepTabletResponse)(nil), // 146: vtctldata.SleepTabletResponse
- (*SourceShardAddRequest)(nil), // 147: vtctldata.SourceShardAddRequest
- (*SourceShardAddResponse)(nil), // 148: vtctldata.SourceShardAddResponse
- (*SourceShardDeleteRequest)(nil), // 149: vtctldata.SourceShardDeleteRequest
- (*SourceShardDeleteResponse)(nil), // 150: vtctldata.SourceShardDeleteResponse
- (*StartReplicationRequest)(nil), // 151: vtctldata.StartReplicationRequest
- (*StartReplicationResponse)(nil), // 152: vtctldata.StartReplicationResponse
- (*StopReplicationRequest)(nil), // 153: vtctldata.StopReplicationRequest
- (*StopReplicationResponse)(nil), // 154: vtctldata.StopReplicationResponse
- (*TabletExternallyReparentedRequest)(nil), // 155: vtctldata.TabletExternallyReparentedRequest
- (*TabletExternallyReparentedResponse)(nil), // 156: vtctldata.TabletExternallyReparentedResponse
- (*UpdateCellInfoRequest)(nil), // 157: vtctldata.UpdateCellInfoRequest
- (*UpdateCellInfoResponse)(nil), // 158: vtctldata.UpdateCellInfoResponse
- (*UpdateCellsAliasRequest)(nil), // 159: vtctldata.UpdateCellsAliasRequest
- (*UpdateCellsAliasResponse)(nil), // 160: vtctldata.UpdateCellsAliasResponse
- (*ValidateRequest)(nil), // 161: vtctldata.ValidateRequest
- (*ValidateResponse)(nil), // 162: vtctldata.ValidateResponse
- (*ValidateKeyspaceRequest)(nil), // 163: vtctldata.ValidateKeyspaceRequest
- (*ValidateKeyspaceResponse)(nil), // 164: vtctldata.ValidateKeyspaceResponse
- (*ValidateSchemaKeyspaceRequest)(nil), // 165: vtctldata.ValidateSchemaKeyspaceRequest
- (*ValidateSchemaKeyspaceResponse)(nil), // 166: vtctldata.ValidateSchemaKeyspaceResponse
- (*ValidateShardRequest)(nil), // 167: vtctldata.ValidateShardRequest
- (*ValidateShardResponse)(nil), // 168: vtctldata.ValidateShardResponse
- (*ValidateVersionKeyspaceRequest)(nil), // 169: vtctldata.ValidateVersionKeyspaceRequest
- (*ValidateVersionKeyspaceResponse)(nil), // 170: vtctldata.ValidateVersionKeyspaceResponse
- (*ValidateVSchemaRequest)(nil), // 171: vtctldata.ValidateVSchemaRequest
- (*ValidateVSchemaResponse)(nil), // 172: vtctldata.ValidateVSchemaResponse
- nil, // 173: vtctldata.Workflow.ShardStreamsEntry
- (*Workflow_ReplicationLocation)(nil), // 174: vtctldata.Workflow.ReplicationLocation
- (*Workflow_ShardStream)(nil), // 175: vtctldata.Workflow.ShardStream
- (*Workflow_Stream)(nil), // 176: vtctldata.Workflow.Stream
- (*Workflow_Stream_CopyState)(nil), // 177: vtctldata.Workflow.Stream.CopyState
- (*Workflow_Stream_Log)(nil), // 178: vtctldata.Workflow.Stream.Log
- nil, // 179: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
- nil, // 180: vtctldata.GetCellsAliasesResponse.AliasesEntry
- nil, // 181: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
- (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 182: vtctldata.GetSrvKeyspaceNamesResponse.NameList
- nil, // 183: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
- nil, // 184: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
- nil, // 185: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
- nil, // 186: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
- nil, // 187: vtctldata.ValidateResponse.ResultsByKeyspaceEntry
- nil, // 188: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
- nil, // 189: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
- nil, // 190: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
- nil, // 191: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
- (*logutil.Event)(nil), // 192: logutil.Event
- (*topodata.Keyspace)(nil), // 193: topodata.Keyspace
- (*topodata.Shard)(nil), // 194: topodata.Shard
- (*topodata.CellInfo)(nil), // 195: topodata.CellInfo
- (*vschema.RoutingRules)(nil), // 196: vschema.RoutingRules
- (*vschema.ShardRoutingRules)(nil), // 197: vschema.ShardRoutingRules
- (*vttime.Duration)(nil), // 198: vttime.Duration
- (*vtrpc.CallerID)(nil), // 199: vtrpc.CallerID
- (*vschema.Keyspace)(nil), // 200: vschema.Keyspace
- (*topodata.TabletAlias)(nil), // 201: topodata.TabletAlias
- (topodata.TabletType)(0), // 202: topodata.TabletType
- (*topodata.Tablet)(nil), // 203: topodata.Tablet
- (*topodata.Keyspace_ServedFrom)(nil), // 204: topodata.Keyspace.ServedFrom
- (topodata.KeyspaceType)(0), // 205: topodata.KeyspaceType
- (*vttime.Time)(nil), // 206: vttime.Time
- (*query.QueryResult)(nil), // 207: query.QueryResult
- (*tabletmanagerdata.ExecuteHookRequest)(nil), // 208: tabletmanagerdata.ExecuteHookRequest
- (*tabletmanagerdata.ExecuteHookResponse)(nil), // 209: tabletmanagerdata.ExecuteHookResponse
- (*mysqlctl.BackupInfo)(nil), // 210: mysqlctl.BackupInfo
- (*replicationdata.FullStatus)(nil), // 211: replicationdata.FullStatus
- (*tabletmanagerdata.Permissions)(nil), // 212: tabletmanagerdata.Permissions
- (*tabletmanagerdata.SchemaDefinition)(nil), // 213: tabletmanagerdata.SchemaDefinition
- (*vschema.SrvVSchema)(nil), // 214: vschema.SrvVSchema
- (*topodata.ShardReplicationError)(nil), // 215: topodata.ShardReplicationError
- (*topodata.KeyRange)(nil), // 216: topodata.KeyRange
- (*topodata.CellsAlias)(nil), // 217: topodata.CellsAlias
- (*topodata.Shard_TabletControl)(nil), // 218: topodata.Shard.TabletControl
- (*binlogdata.BinlogSource)(nil), // 219: binlogdata.BinlogSource
- (*topodata.SrvKeyspace)(nil), // 220: topodata.SrvKeyspace
- (*replicationdata.Status)(nil), // 221: replicationdata.Status
+ (*GetTopologyPathRequest)(nil), // 87: vtctldata.GetTopologyPathRequest
+ (*GetTopologyPathResponse)(nil), // 88: vtctldata.GetTopologyPathResponse
+ (*TopologyCell)(nil), // 89: vtctldata.TopologyCell
+ (*GetVSchemaRequest)(nil), // 90: vtctldata.GetVSchemaRequest
+ (*GetVersionRequest)(nil), // 91: vtctldata.GetVersionRequest
+ (*GetVersionResponse)(nil), // 92: vtctldata.GetVersionResponse
+ (*GetVSchemaResponse)(nil), // 93: vtctldata.GetVSchemaResponse
+ (*GetWorkflowsRequest)(nil), // 94: vtctldata.GetWorkflowsRequest
+ (*GetWorkflowsResponse)(nil), // 95: vtctldata.GetWorkflowsResponse
+ (*InitShardPrimaryRequest)(nil), // 96: vtctldata.InitShardPrimaryRequest
+ (*InitShardPrimaryResponse)(nil), // 97: vtctldata.InitShardPrimaryResponse
+ (*PingTabletRequest)(nil), // 98: vtctldata.PingTabletRequest
+ (*PingTabletResponse)(nil), // 99: vtctldata.PingTabletResponse
+ (*PlannedReparentShardRequest)(nil), // 100: vtctldata.PlannedReparentShardRequest
+ (*PlannedReparentShardResponse)(nil), // 101: vtctldata.PlannedReparentShardResponse
+ (*RebuildKeyspaceGraphRequest)(nil), // 102: vtctldata.RebuildKeyspaceGraphRequest
+ (*RebuildKeyspaceGraphResponse)(nil), // 103: vtctldata.RebuildKeyspaceGraphResponse
+ (*RebuildVSchemaGraphRequest)(nil), // 104: vtctldata.RebuildVSchemaGraphRequest
+ (*RebuildVSchemaGraphResponse)(nil), // 105: vtctldata.RebuildVSchemaGraphResponse
+ (*RefreshStateRequest)(nil), // 106: vtctldata.RefreshStateRequest
+ (*RefreshStateResponse)(nil), // 107: vtctldata.RefreshStateResponse
+ (*RefreshStateByShardRequest)(nil), // 108: vtctldata.RefreshStateByShardRequest
+ (*RefreshStateByShardResponse)(nil), // 109: vtctldata.RefreshStateByShardResponse
+ (*ReloadSchemaRequest)(nil), // 110: vtctldata.ReloadSchemaRequest
+ (*ReloadSchemaResponse)(nil), // 111: vtctldata.ReloadSchemaResponse
+ (*ReloadSchemaKeyspaceRequest)(nil), // 112: vtctldata.ReloadSchemaKeyspaceRequest
+ (*ReloadSchemaKeyspaceResponse)(nil), // 113: vtctldata.ReloadSchemaKeyspaceResponse
+ (*ReloadSchemaShardRequest)(nil), // 114: vtctldata.ReloadSchemaShardRequest
+ (*ReloadSchemaShardResponse)(nil), // 115: vtctldata.ReloadSchemaShardResponse
+ (*RemoveBackupRequest)(nil), // 116: vtctldata.RemoveBackupRequest
+ (*RemoveBackupResponse)(nil), // 117: vtctldata.RemoveBackupResponse
+ (*RemoveKeyspaceCellRequest)(nil), // 118: vtctldata.RemoveKeyspaceCellRequest
+ (*RemoveKeyspaceCellResponse)(nil), // 119: vtctldata.RemoveKeyspaceCellResponse
+ (*RemoveShardCellRequest)(nil), // 120: vtctldata.RemoveShardCellRequest
+ (*RemoveShardCellResponse)(nil), // 121: vtctldata.RemoveShardCellResponse
+ (*ReparentTabletRequest)(nil), // 122: vtctldata.ReparentTabletRequest
+ (*ReparentTabletResponse)(nil), // 123: vtctldata.ReparentTabletResponse
+ (*RestoreFromBackupRequest)(nil), // 124: vtctldata.RestoreFromBackupRequest
+ (*RestoreFromBackupResponse)(nil), // 125: vtctldata.RestoreFromBackupResponse
+ (*RunHealthCheckRequest)(nil), // 126: vtctldata.RunHealthCheckRequest
+ (*RunHealthCheckResponse)(nil), // 127: vtctldata.RunHealthCheckResponse
+ (*SetKeyspaceDurabilityPolicyRequest)(nil), // 128: vtctldata.SetKeyspaceDurabilityPolicyRequest
+ (*SetKeyspaceDurabilityPolicyResponse)(nil), // 129: vtctldata.SetKeyspaceDurabilityPolicyResponse
+ (*SetKeyspaceServedFromRequest)(nil), // 130: vtctldata.SetKeyspaceServedFromRequest
+ (*SetKeyspaceServedFromResponse)(nil), // 131: vtctldata.SetKeyspaceServedFromResponse
+ (*SetKeyspaceShardingInfoRequest)(nil), // 132: vtctldata.SetKeyspaceShardingInfoRequest
+ (*SetKeyspaceShardingInfoResponse)(nil), // 133: vtctldata.SetKeyspaceShardingInfoResponse
+ (*SetShardIsPrimaryServingRequest)(nil), // 134: vtctldata.SetShardIsPrimaryServingRequest
+ (*SetShardIsPrimaryServingResponse)(nil), // 135: vtctldata.SetShardIsPrimaryServingResponse
+ (*SetShardTabletControlRequest)(nil), // 136: vtctldata.SetShardTabletControlRequest
+ (*SetShardTabletControlResponse)(nil), // 137: vtctldata.SetShardTabletControlResponse
+ (*SetWritableRequest)(nil), // 138: vtctldata.SetWritableRequest
+ (*SetWritableResponse)(nil), // 139: vtctldata.SetWritableResponse
+ (*ShardReplicationAddRequest)(nil), // 140: vtctldata.ShardReplicationAddRequest
+ (*ShardReplicationAddResponse)(nil), // 141: vtctldata.ShardReplicationAddResponse
+ (*ShardReplicationFixRequest)(nil), // 142: vtctldata.ShardReplicationFixRequest
+ (*ShardReplicationFixResponse)(nil), // 143: vtctldata.ShardReplicationFixResponse
+ (*ShardReplicationPositionsRequest)(nil), // 144: vtctldata.ShardReplicationPositionsRequest
+ (*ShardReplicationPositionsResponse)(nil), // 145: vtctldata.ShardReplicationPositionsResponse
+ (*ShardReplicationRemoveRequest)(nil), // 146: vtctldata.ShardReplicationRemoveRequest
+ (*ShardReplicationRemoveResponse)(nil), // 147: vtctldata.ShardReplicationRemoveResponse
+ (*SleepTabletRequest)(nil), // 148: vtctldata.SleepTabletRequest
+ (*SleepTabletResponse)(nil), // 149: vtctldata.SleepTabletResponse
+ (*SourceShardAddRequest)(nil), // 150: vtctldata.SourceShardAddRequest
+ (*SourceShardAddResponse)(nil), // 151: vtctldata.SourceShardAddResponse
+ (*SourceShardDeleteRequest)(nil), // 152: vtctldata.SourceShardDeleteRequest
+ (*SourceShardDeleteResponse)(nil), // 153: vtctldata.SourceShardDeleteResponse
+ (*StartReplicationRequest)(nil), // 154: vtctldata.StartReplicationRequest
+ (*StartReplicationResponse)(nil), // 155: vtctldata.StartReplicationResponse
+ (*StopReplicationRequest)(nil), // 156: vtctldata.StopReplicationRequest
+ (*StopReplicationResponse)(nil), // 157: vtctldata.StopReplicationResponse
+ (*TabletExternallyReparentedRequest)(nil), // 158: vtctldata.TabletExternallyReparentedRequest
+ (*TabletExternallyReparentedResponse)(nil), // 159: vtctldata.TabletExternallyReparentedResponse
+ (*UpdateCellInfoRequest)(nil), // 160: vtctldata.UpdateCellInfoRequest
+ (*UpdateCellInfoResponse)(nil), // 161: vtctldata.UpdateCellInfoResponse
+ (*UpdateCellsAliasRequest)(nil), // 162: vtctldata.UpdateCellsAliasRequest
+ (*UpdateCellsAliasResponse)(nil), // 163: vtctldata.UpdateCellsAliasResponse
+ (*ValidateRequest)(nil), // 164: vtctldata.ValidateRequest
+ (*ValidateResponse)(nil), // 165: vtctldata.ValidateResponse
+ (*ValidateKeyspaceRequest)(nil), // 166: vtctldata.ValidateKeyspaceRequest
+ (*ValidateKeyspaceResponse)(nil), // 167: vtctldata.ValidateKeyspaceResponse
+ (*ValidateSchemaKeyspaceRequest)(nil), // 168: vtctldata.ValidateSchemaKeyspaceRequest
+ (*ValidateSchemaKeyspaceResponse)(nil), // 169: vtctldata.ValidateSchemaKeyspaceResponse
+ (*ValidateShardRequest)(nil), // 170: vtctldata.ValidateShardRequest
+ (*ValidateShardResponse)(nil), // 171: vtctldata.ValidateShardResponse
+ (*ValidateVersionKeyspaceRequest)(nil), // 172: vtctldata.ValidateVersionKeyspaceRequest
+ (*ValidateVersionKeyspaceResponse)(nil), // 173: vtctldata.ValidateVersionKeyspaceResponse
+ (*ValidateVersionShardRequest)(nil), // 174: vtctldata.ValidateVersionShardRequest
+ (*ValidateVersionShardResponse)(nil), // 175: vtctldata.ValidateVersionShardResponse
+ (*ValidateVSchemaRequest)(nil), // 176: vtctldata.ValidateVSchemaRequest
+ (*ValidateVSchemaResponse)(nil), // 177: vtctldata.ValidateVSchemaResponse
+ nil, // 178: vtctldata.Workflow.ShardStreamsEntry
+ (*Workflow_ReplicationLocation)(nil), // 179: vtctldata.Workflow.ReplicationLocation
+ (*Workflow_ShardStream)(nil), // 180: vtctldata.Workflow.ShardStream
+ (*Workflow_Stream)(nil), // 181: vtctldata.Workflow.Stream
+ (*Workflow_Stream_CopyState)(nil), // 182: vtctldata.Workflow.Stream.CopyState
+ (*Workflow_Stream_Log)(nil), // 183: vtctldata.Workflow.Stream.Log
+ nil, // 184: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
+ nil, // 185: vtctldata.GetCellsAliasesResponse.AliasesEntry
+ nil, // 186: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
+ (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 187: vtctldata.GetSrvKeyspaceNamesResponse.NameList
+ nil, // 188: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
+ nil, // 189: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
+ nil, // 190: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
+ nil, // 191: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
+ nil, // 192: vtctldata.ValidateResponse.ResultsByKeyspaceEntry
+ nil, // 193: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
+ nil, // 194: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
+ nil, // 195: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
+ nil, // 196: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
+ (*logutil.Event)(nil), // 197: logutil.Event
+ (*topodata.Keyspace)(nil), // 198: topodata.Keyspace
+ (*topodata.Shard)(nil), // 199: topodata.Shard
+ (*topodata.CellInfo)(nil), // 200: topodata.CellInfo
+ (*vschema.RoutingRules)(nil), // 201: vschema.RoutingRules
+ (*vschema.ShardRoutingRules)(nil), // 202: vschema.ShardRoutingRules
+ (*vttime.Duration)(nil), // 203: vttime.Duration
+ (*vtrpc.CallerID)(nil), // 204: vtrpc.CallerID
+ (*vschema.Keyspace)(nil), // 205: vschema.Keyspace
+ (*topodata.TabletAlias)(nil), // 206: topodata.TabletAlias
+ (topodata.TabletType)(0), // 207: topodata.TabletType
+ (*topodata.Tablet)(nil), // 208: topodata.Tablet
+ (*topodata.Keyspace_ServedFrom)(nil), // 209: topodata.Keyspace.ServedFrom
+ (topodata.KeyspaceType)(0), // 210: topodata.KeyspaceType
+ (*vttime.Time)(nil), // 211: vttime.Time
+ (*query.QueryResult)(nil), // 212: query.QueryResult
+ (*tabletmanagerdata.ExecuteHookRequest)(nil), // 213: tabletmanagerdata.ExecuteHookRequest
+ (*tabletmanagerdata.ExecuteHookResponse)(nil), // 214: tabletmanagerdata.ExecuteHookResponse
+ (*mysqlctl.BackupInfo)(nil), // 215: mysqlctl.BackupInfo
+ (*replicationdata.FullStatus)(nil), // 216: replicationdata.FullStatus
+ (*tabletmanagerdata.Permissions)(nil), // 217: tabletmanagerdata.Permissions
+ (*tabletmanagerdata.SchemaDefinition)(nil), // 218: tabletmanagerdata.SchemaDefinition
+ (*vschema.SrvVSchema)(nil), // 219: vschema.SrvVSchema
+ (*topodata.ShardReplicationError)(nil), // 220: topodata.ShardReplicationError
+ (*topodata.KeyRange)(nil), // 221: topodata.KeyRange
+ (*topodata.CellsAlias)(nil), // 222: topodata.CellsAlias
+ (*topodata.Shard_TabletControl)(nil), // 223: topodata.Shard.TabletControl
+ (*binlogdata.BinlogSource)(nil), // 224: binlogdata.BinlogSource
+ (*topodata.SrvKeyspace)(nil), // 225: topodata.SrvKeyspace
+ (*replicationdata.Status)(nil), // 226: replicationdata.Status
}
var file_vtctldata_proto_depIdxs = []int32{
- 192, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event
+ 197, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event
3, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings
0, // 2: vtctldata.MaterializeSettings.materialization_intent:type_name -> vtctldata.MaterializationIntent
- 193, // 3: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace
- 194, // 4: vtctldata.Shard.shard:type_name -> topodata.Shard
- 174, // 5: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation
- 174, // 6: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation
- 173, // 7: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry
- 195, // 8: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo
- 196, // 9: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules
- 197, // 10: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules
- 198, // 11: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 199, // 12: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID
- 200, // 13: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace
- 200, // 14: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace
- 201, // 15: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 16: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias
- 192, // 17: vtctldata.BackupResponse.event:type_name -> logutil.Event
- 201, // 18: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias
- 202, // 19: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType
- 203, // 20: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet
- 203, // 21: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet
- 204, // 22: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom
- 205, // 23: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType
- 206, // 24: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time
+ 198, // 3: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace
+ 199, // 4: vtctldata.Shard.shard:type_name -> topodata.Shard
+ 179, // 5: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation
+ 179, // 6: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation
+ 178, // 7: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry
+ 200, // 8: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo
+ 201, // 9: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules
+ 202, // 10: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules
+ 203, // 11: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 204, // 12: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID
+ 205, // 13: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace
+ 205, // 14: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace
+ 206, // 15: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 16: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 197, // 17: vtctldata.BackupResponse.event:type_name -> logutil.Event
+ 206, // 18: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 207, // 19: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType
+ 208, // 20: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet
+ 208, // 21: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet
+ 209, // 22: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom
+ 210, // 23: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType
+ 211, // 24: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time
5, // 25: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace
5, // 26: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace
6, // 27: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard
6, // 28: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard
- 201, // 29: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
- 201, // 30: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
- 201, // 31: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias
- 198, // 32: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 201, // 33: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 192, // 34: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event
- 201, // 35: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias
- 207, // 36: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult
- 201, // 37: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias
- 207, // 38: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult
- 201, // 39: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias
- 208, // 40: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest
- 209, // 41: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse
- 179, // 42: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
- 210, // 43: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo
- 195, // 44: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo
- 180, // 45: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry
- 201, // 46: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias
- 211, // 47: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus
- 5, // 48: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace
- 5, // 49: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace
- 201, // 50: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias
- 212, // 51: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions
- 196, // 52: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules
- 201, // 53: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
- 213, // 54: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition
- 6, // 55: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard
- 197, // 56: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules
- 181, // 57: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
- 183, // 58: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
- 214, // 59: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema
- 184, // 60: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
- 201, // 61: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
- 203, // 62: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet
- 201, // 63: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
- 202, // 64: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType
- 203, // 65: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet
- 201, // 66: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias
- 200, // 67: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace
- 7, // 68: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow
- 201, // 69: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias
- 198, // 70: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 192, // 71: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event
- 201, // 72: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 73: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
- 201, // 74: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias
- 198, // 75: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
- 201, // 76: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
- 192, // 77: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event
- 201, // 78: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 79: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
- 192, // 80: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event
- 192, // 81: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event
- 201, // 82: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias
- 201, // 83: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias
- 201, // 84: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias
- 206, // 85: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time
- 201, // 86: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias
- 192, // 87: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event
- 201, // 88: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias
- 193, // 89: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace
- 202, // 90: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType
- 193, // 91: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace
- 193, // 92: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace
- 194, // 93: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard
- 202, // 94: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType
- 194, // 95: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard
- 201, // 96: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 97: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias
- 215, // 98: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError
- 185, // 99: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
- 186, // 100: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
- 201, // 101: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 102: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
- 198, // 103: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration
- 216, // 104: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange
- 194, // 105: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard
- 194, // 106: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard
- 201, // 107: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 108: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
- 201, // 109: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias
- 201, // 110: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias
- 201, // 111: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias
- 195, // 112: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo
- 195, // 113: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo
- 217, // 114: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias
- 217, // 115: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias
- 187, // 116: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry
- 188, // 117: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
- 189, // 118: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
- 190, // 119: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
- 191, // 120: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
- 175, // 121: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream
- 176, // 122: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream
- 218, // 123: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl
- 201, // 124: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias
- 219, // 125: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource
- 206, // 126: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time
- 206, // 127: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time
- 177, // 128: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState
- 178, // 129: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log
- 206, // 130: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time
- 206, // 131: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time
- 6, // 132: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard
- 217, // 133: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias
- 182, // 134: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList
- 220, // 135: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace
- 214, // 136: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema
- 221, // 137: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status
- 203, // 138: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet
- 164, // 139: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse
- 168, // 140: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 168, // 141: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 168, // 142: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 168, // 143: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
- 144, // [144:144] is the sub-list for method output_type
- 144, // [144:144] is the sub-list for method input_type
- 144, // [144:144] is the sub-list for extension type_name
- 144, // [144:144] is the sub-list for extension extendee
- 0, // [0:144] is the sub-list for field type_name
+ 206, // 29: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
+ 206, // 30: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
+ 206, // 31: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias
+ 203, // 32: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 206, // 33: vtctldata.EmergencyReparentShardRequest.expected_primary:type_name -> topodata.TabletAlias
+ 206, // 34: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 197, // 35: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event
+ 206, // 36: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 212, // 37: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult
+ 206, // 38: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias
+ 212, // 39: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult
+ 206, // 40: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 213, // 41: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest
+ 214, // 42: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse
+ 184, // 43: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry
+ 215, // 44: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo
+ 200, // 45: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo
+ 185, // 46: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry
+ 206, // 47: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 216, // 48: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus
+ 5, // 49: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace
+ 5, // 50: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace
+ 206, // 51: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 217, // 52: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions
+ 201, // 53: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules
+ 206, // 54: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 218, // 55: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition
+ 6, // 56: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard
+ 202, // 57: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules
+ 186, // 58: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry
+ 188, // 59: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry
+ 219, // 60: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema
+ 189, // 61: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry
+ 206, // 62: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 208, // 63: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet
+ 206, // 64: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias
+ 207, // 65: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType
+ 208, // 66: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet
+ 89, // 67: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell
+ 206, // 68: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 205, // 69: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace
+ 7, // 70: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow
+ 206, // 71: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias
+ 203, // 72: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 197, // 73: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event
+ 206, // 74: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 75: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias
+ 206, // 76: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias
+ 203, // 77: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration
+ 206, // 78: vtctldata.PlannedReparentShardRequest.expected_primary:type_name -> topodata.TabletAlias
+ 206, // 79: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias
+ 197, // 80: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event
+ 206, // 81: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 82: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 197, // 83: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event
+ 197, // 84: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event
+ 206, // 85: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias
+ 206, // 86: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias
+ 206, // 87: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 211, // 88: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time
+ 206, // 89: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias
+ 197, // 90: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event
+ 206, // 91: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 198, // 92: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace
+ 207, // 93: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType
+ 198, // 94: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace
+ 198, // 95: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace
+ 199, // 96: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard
+ 207, // 97: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType
+ 199, // 98: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard
+ 206, // 99: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 100: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 220, // 101: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError
+ 190, // 102: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry
+ 191, // 103: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry
+ 206, // 104: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 105: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 203, // 106: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration
+ 221, // 107: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange
+ 199, // 108: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard
+ 199, // 109: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard
+ 206, // 110: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 111: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias
+ 206, // 112: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias
+ 206, // 113: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias
+ 206, // 114: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias
+ 200, // 115: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo
+ 200, // 116: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo
+ 222, // 117: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias
+ 222, // 118: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias
+ 192, // 119: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry
+ 193, // 120: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry
+ 194, // 121: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry
+ 195, // 122: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry
+ 196, // 123: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry
+ 180, // 124: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream
+ 181, // 125: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream
+ 223, // 126: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl
+ 206, // 127: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias
+ 224, // 128: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource
+ 211, // 129: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time
+ 211, // 130: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time
+ 182, // 131: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState
+ 183, // 132: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log
+ 211, // 133: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time
+ 211, // 134: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time
+ 6, // 135: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard
+ 222, // 136: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias
+ 187, // 137: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList
+ 225, // 138: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace
+ 219, // 139: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema
+ 226, // 140: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status
+ 208, // 141: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet
+ 167, // 142: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse
+ 171, // 143: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 171, // 144: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 171, // 145: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 171, // 146: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse
+ 147, // [147:147] is the sub-list for method output_type
+ 147, // [147:147] is the sub-list for method input_type
+ 147, // [147:147] is the sub-list for extension type_name
+ 147, // [147:147] is the sub-list for extension extendee
+ 0, // [0:147] is the sub-list for field type_name
}
func init() { file_vtctldata_proto_init() }
@@ -12979,7 +13332,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemaRequest); i {
+ switch v := v.(*GetTopologyPathRequest); i {
case 0:
return &v.state
case 1:
@@ -12991,7 +13344,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVersionRequest); i {
+ switch v := v.(*GetTopologyPathResponse); i {
case 0:
return &v.state
case 1:
@@ -13003,7 +13356,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVersionResponse); i {
+ switch v := v.(*TopologyCell); i {
case 0:
return &v.state
case 1:
@@ -13015,7 +13368,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetVSchemaResponse); i {
+ switch v := v.(*GetVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -13027,7 +13380,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowsRequest); i {
+ switch v := v.(*GetVersionRequest); i {
case 0:
return &v.state
case 1:
@@ -13039,7 +13392,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetWorkflowsResponse); i {
+ switch v := v.(*GetVersionResponse); i {
case 0:
return &v.state
case 1:
@@ -13051,7 +13404,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InitShardPrimaryRequest); i {
+ switch v := v.(*GetVSchemaResponse); i {
case 0:
return &v.state
case 1:
@@ -13063,7 +13416,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InitShardPrimaryResponse); i {
+ switch v := v.(*GetWorkflowsRequest); i {
case 0:
return &v.state
case 1:
@@ -13075,7 +13428,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PingTabletRequest); i {
+ switch v := v.(*GetWorkflowsResponse); i {
case 0:
return &v.state
case 1:
@@ -13087,7 +13440,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PingTabletResponse); i {
+ switch v := v.(*InitShardPrimaryRequest); i {
case 0:
return &v.state
case 1:
@@ -13099,7 +13452,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlannedReparentShardRequest); i {
+ switch v := v.(*InitShardPrimaryResponse); i {
case 0:
return &v.state
case 1:
@@ -13111,7 +13464,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlannedReparentShardResponse); i {
+ switch v := v.(*PingTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -13123,7 +13476,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildKeyspaceGraphRequest); i {
+ switch v := v.(*PingTabletResponse); i {
case 0:
return &v.state
case 1:
@@ -13135,7 +13488,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildKeyspaceGraphResponse); i {
+ switch v := v.(*PlannedReparentShardRequest); i {
case 0:
return &v.state
case 1:
@@ -13147,7 +13500,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildVSchemaGraphRequest); i {
+ switch v := v.(*PlannedReparentShardResponse); i {
case 0:
return &v.state
case 1:
@@ -13159,7 +13512,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RebuildVSchemaGraphResponse); i {
+ switch v := v.(*RebuildKeyspaceGraphRequest); i {
case 0:
return &v.state
case 1:
@@ -13171,7 +13524,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateRequest); i {
+ switch v := v.(*RebuildKeyspaceGraphResponse); i {
case 0:
return &v.state
case 1:
@@ -13183,7 +13536,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateResponse); i {
+ switch v := v.(*RebuildVSchemaGraphRequest); i {
case 0:
return &v.state
case 1:
@@ -13195,7 +13548,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateByShardRequest); i {
+ switch v := v.(*RebuildVSchemaGraphResponse); i {
case 0:
return &v.state
case 1:
@@ -13207,7 +13560,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RefreshStateByShardResponse); i {
+ switch v := v.(*RefreshStateRequest); i {
case 0:
return &v.state
case 1:
@@ -13219,7 +13572,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaRequest); i {
+ switch v := v.(*RefreshStateResponse); i {
case 0:
return &v.state
case 1:
@@ -13231,7 +13584,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaResponse); i {
+ switch v := v.(*RefreshStateByShardRequest); i {
case 0:
return &v.state
case 1:
@@ -13243,7 +13596,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaKeyspaceRequest); i {
+ switch v := v.(*RefreshStateByShardResponse); i {
case 0:
return &v.state
case 1:
@@ -13255,7 +13608,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaKeyspaceResponse); i {
+ switch v := v.(*ReloadSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -13267,7 +13620,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaShardRequest); i {
+ switch v := v.(*ReloadSchemaResponse); i {
case 0:
return &v.state
case 1:
@@ -13279,7 +13632,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReloadSchemaShardResponse); i {
+ switch v := v.(*ReloadSchemaKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -13291,7 +13644,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveBackupRequest); i {
+ switch v := v.(*ReloadSchemaKeyspaceResponse); i {
case 0:
return &v.state
case 1:
@@ -13303,7 +13656,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveBackupResponse); i {
+ switch v := v.(*ReloadSchemaShardRequest); i {
case 0:
return &v.state
case 1:
@@ -13315,7 +13668,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveKeyspaceCellRequest); i {
+ switch v := v.(*ReloadSchemaShardResponse); i {
case 0:
return &v.state
case 1:
@@ -13327,7 +13680,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveKeyspaceCellResponse); i {
+ switch v := v.(*RemoveBackupRequest); i {
case 0:
return &v.state
case 1:
@@ -13339,7 +13692,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveShardCellRequest); i {
+ switch v := v.(*RemoveBackupResponse); i {
case 0:
return &v.state
case 1:
@@ -13351,7 +13704,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RemoveShardCellResponse); i {
+ switch v := v.(*RemoveKeyspaceCellRequest); i {
case 0:
return &v.state
case 1:
@@ -13363,7 +13716,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReparentTabletRequest); i {
+ switch v := v.(*RemoveKeyspaceCellResponse); i {
case 0:
return &v.state
case 1:
@@ -13375,7 +13728,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReparentTabletResponse); i {
+ switch v := v.(*RemoveShardCellRequest); i {
case 0:
return &v.state
case 1:
@@ -13387,7 +13740,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreFromBackupRequest); i {
+ switch v := v.(*RemoveShardCellResponse); i {
case 0:
return &v.state
case 1:
@@ -13399,7 +13752,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreFromBackupResponse); i {
+ switch v := v.(*ReparentTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -13411,7 +13764,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RunHealthCheckRequest); i {
+ switch v := v.(*ReparentTabletResponse); i {
case 0:
return &v.state
case 1:
@@ -13423,7 +13776,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RunHealthCheckResponse); i {
+ switch v := v.(*RestoreFromBackupRequest); i {
case 0:
return &v.state
case 1:
@@ -13435,7 +13788,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i {
+ switch v := v.(*RestoreFromBackupResponse); i {
case 0:
return &v.state
case 1:
@@ -13447,7 +13800,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i {
+ switch v := v.(*RunHealthCheckRequest); i {
case 0:
return &v.state
case 1:
@@ -13459,7 +13812,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetKeyspaceServedFromRequest); i {
+ switch v := v.(*RunHealthCheckResponse); i {
case 0:
return &v.state
case 1:
@@ -13471,7 +13824,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetKeyspaceServedFromResponse); i {
+ switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i {
case 0:
return &v.state
case 1:
@@ -13483,7 +13836,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetKeyspaceShardingInfoRequest); i {
+ switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i {
case 0:
return &v.state
case 1:
@@ -13495,7 +13848,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetKeyspaceShardingInfoResponse); i {
+ switch v := v.(*SetKeyspaceServedFromRequest); i {
case 0:
return &v.state
case 1:
@@ -13507,7 +13860,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardIsPrimaryServingRequest); i {
+ switch v := v.(*SetKeyspaceServedFromResponse); i {
case 0:
return &v.state
case 1:
@@ -13519,7 +13872,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardIsPrimaryServingResponse); i {
+ switch v := v.(*SetKeyspaceShardingInfoRequest); i {
case 0:
return &v.state
case 1:
@@ -13531,7 +13884,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardTabletControlRequest); i {
+ switch v := v.(*SetKeyspaceShardingInfoResponse); i {
case 0:
return &v.state
case 1:
@@ -13543,7 +13896,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardTabletControlResponse); i {
+ switch v := v.(*SetShardIsPrimaryServingRequest); i {
case 0:
return &v.state
case 1:
@@ -13555,7 +13908,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetWritableRequest); i {
+ switch v := v.(*SetShardIsPrimaryServingResponse); i {
case 0:
return &v.state
case 1:
@@ -13567,7 +13920,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetWritableResponse); i {
+ switch v := v.(*SetShardTabletControlRequest); i {
case 0:
return &v.state
case 1:
@@ -13579,7 +13932,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationAddRequest); i {
+ switch v := v.(*SetShardTabletControlResponse); i {
case 0:
return &v.state
case 1:
@@ -13591,7 +13944,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationAddResponse); i {
+ switch v := v.(*SetWritableRequest); i {
case 0:
return &v.state
case 1:
@@ -13603,7 +13956,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationFixRequest); i {
+ switch v := v.(*SetWritableResponse); i {
case 0:
return &v.state
case 1:
@@ -13615,7 +13968,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationFixResponse); i {
+ switch v := v.(*ShardReplicationAddRequest); i {
case 0:
return &v.state
case 1:
@@ -13627,7 +13980,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationPositionsRequest); i {
+ switch v := v.(*ShardReplicationAddResponse); i {
case 0:
return &v.state
case 1:
@@ -13639,7 +13992,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationPositionsResponse); i {
+ switch v := v.(*ShardReplicationFixRequest); i {
case 0:
return &v.state
case 1:
@@ -13651,7 +14004,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationRemoveRequest); i {
+ switch v := v.(*ShardReplicationFixResponse); i {
case 0:
return &v.state
case 1:
@@ -13663,7 +14016,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShardReplicationRemoveResponse); i {
+ switch v := v.(*ShardReplicationPositionsRequest); i {
case 0:
return &v.state
case 1:
@@ -13675,7 +14028,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SleepTabletRequest); i {
+ switch v := v.(*ShardReplicationPositionsResponse); i {
case 0:
return &v.state
case 1:
@@ -13687,7 +14040,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SleepTabletResponse); i {
+ switch v := v.(*ShardReplicationRemoveRequest); i {
case 0:
return &v.state
case 1:
@@ -13699,7 +14052,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceShardAddRequest); i {
+ switch v := v.(*ShardReplicationRemoveResponse); i {
case 0:
return &v.state
case 1:
@@ -13711,7 +14064,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceShardAddResponse); i {
+ switch v := v.(*SleepTabletRequest); i {
case 0:
return &v.state
case 1:
@@ -13723,7 +14076,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceShardDeleteRequest); i {
+ switch v := v.(*SleepTabletResponse); i {
case 0:
return &v.state
case 1:
@@ -13735,7 +14088,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceShardDeleteResponse); i {
+ switch v := v.(*SourceShardAddRequest); i {
case 0:
return &v.state
case 1:
@@ -13747,7 +14100,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartReplicationRequest); i {
+ switch v := v.(*SourceShardAddResponse); i {
case 0:
return &v.state
case 1:
@@ -13759,7 +14112,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartReplicationResponse); i {
+ switch v := v.(*SourceShardDeleteRequest); i {
case 0:
return &v.state
case 1:
@@ -13771,7 +14124,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopReplicationRequest); i {
+ switch v := v.(*SourceShardDeleteResponse); i {
case 0:
return &v.state
case 1:
@@ -13783,7 +14136,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StopReplicationResponse); i {
+ switch v := v.(*StartReplicationRequest); i {
case 0:
return &v.state
case 1:
@@ -13795,7 +14148,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyReparentedRequest); i {
+ switch v := v.(*StartReplicationResponse); i {
case 0:
return &v.state
case 1:
@@ -13807,7 +14160,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TabletExternallyReparentedResponse); i {
+ switch v := v.(*StopReplicationRequest); i {
case 0:
return &v.state
case 1:
@@ -13819,7 +14172,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCellInfoRequest); i {
+ switch v := v.(*StopReplicationResponse); i {
case 0:
return &v.state
case 1:
@@ -13831,7 +14184,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCellInfoResponse); i {
+ switch v := v.(*TabletExternallyReparentedRequest); i {
case 0:
return &v.state
case 1:
@@ -13843,7 +14196,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCellsAliasRequest); i {
+ switch v := v.(*TabletExternallyReparentedResponse); i {
case 0:
return &v.state
case 1:
@@ -13855,7 +14208,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCellsAliasResponse); i {
+ switch v := v.(*UpdateCellInfoRequest); i {
case 0:
return &v.state
case 1:
@@ -13867,7 +14220,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateRequest); i {
+ switch v := v.(*UpdateCellInfoResponse); i {
case 0:
return &v.state
case 1:
@@ -13879,7 +14232,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateResponse); i {
+ switch v := v.(*UpdateCellsAliasRequest); i {
case 0:
return &v.state
case 1:
@@ -13891,7 +14244,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateKeyspaceRequest); i {
+ switch v := v.(*UpdateCellsAliasResponse); i {
case 0:
return &v.state
case 1:
@@ -13903,7 +14256,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateKeyspaceResponse); i {
+ switch v := v.(*ValidateRequest); i {
case 0:
return &v.state
case 1:
@@ -13915,7 +14268,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateSchemaKeyspaceRequest); i {
+ switch v := v.(*ValidateResponse); i {
case 0:
return &v.state
case 1:
@@ -13927,7 +14280,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateSchemaKeyspaceResponse); i {
+ switch v := v.(*ValidateKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -13939,7 +14292,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateShardRequest); i {
+ switch v := v.(*ValidateKeyspaceResponse); i {
case 0:
return &v.state
case 1:
@@ -13951,7 +14304,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateShardResponse); i {
+ switch v := v.(*ValidateSchemaKeyspaceRequest); i {
case 0:
return &v.state
case 1:
@@ -13963,7 +14316,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateVersionKeyspaceRequest); i {
+ switch v := v.(*ValidateSchemaKeyspaceResponse); i {
case 0:
return &v.state
case 1:
@@ -13975,7 +14328,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateVersionKeyspaceResponse); i {
+ switch v := v.(*ValidateShardRequest); i {
case 0:
return &v.state
case 1:
@@ -13987,7 +14340,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateVSchemaRequest); i {
+ switch v := v.(*ValidateShardResponse); i {
case 0:
return &v.state
case 1:
@@ -13999,7 +14352,19 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateVSchemaResponse); i {
+ switch v := v.(*ValidateVersionKeyspaceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVersionKeyspaceResponse); i {
case 0:
return &v.state
case 1:
@@ -14011,7 +14376,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Workflow_ReplicationLocation); i {
+ switch v := v.(*ValidateVersionShardRequest); i {
case 0:
return &v.state
case 1:
@@ -14023,7 +14388,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Workflow_ShardStream); i {
+ switch v := v.(*ValidateVersionShardResponse); i {
case 0:
return &v.state
case 1:
@@ -14035,7 +14400,7 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Workflow_Stream); i {
+ switch v := v.(*ValidateVSchemaRequest); i {
case 0:
return &v.state
case 1:
@@ -14047,6 +14412,54 @@ func file_vtctldata_proto_init() {
}
}
file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ValidateVSchemaResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Workflow_ReplicationLocation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Workflow_ShardStream); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Workflow_Stream); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_Stream_CopyState); i {
case 0:
return &v.state
@@ -14058,7 +14471,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workflow_Stream_Log); i {
case 0:
return &v.state
@@ -14070,7 +14483,7 @@ func file_vtctldata_proto_init() {
return nil
}
}
- file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} {
+ file_vtctldata_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i {
case 0:
return &v.state
@@ -14083,13 +14496,14 @@ func file_vtctldata_proto_init() {
}
}
}
+ file_vtctldata_proto_msgTypes[19].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vtctldata_proto_rawDesc,
NumEnums: 1,
- NumMessages: 191,
+ NumMessages: 196,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go
index 08943dd5ac5..143b9bd0f70 100644
--- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go
+++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go
@@ -1571,6 +1571,13 @@ func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.BackupEngine != nil {
+ i -= len(*m.BackupEngine)
+ copy(dAtA[i:], *m.BackupEngine)
+ i = encodeVarint(dAtA, i, uint64(len(*m.BackupEngine)))
+ i--
+ dAtA[i] = 0x32
+ }
if m.Concurrency != 0 {
i = encodeVarint(dAtA, i, uint64(m.Concurrency))
i--
@@ -2671,6 +2678,16 @@ func (m *EmergencyReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.ExpectedPrimary != nil {
+ size, err := m.ExpectedPrimary.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ }
if m.PreventCrossCellPromotion {
i--
if m.PreventCrossCellPromotion {
@@ -4930,7 +4947,7 @@ func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
+func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -4943,12 +4960,12 @@ func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *GetVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -4960,17 +4977,17 @@ func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if len(m.Keyspace) > 0 {
- i -= len(m.Keyspace)
- copy(dAtA[i:], m.Keyspace)
- i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarint(dAtA, i, uint64(len(m.Path)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) {
+func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -4983,12 +5000,12 @@ func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *GetVersionRequest) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetTopologyPathResponse) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5000,8 +5017,8 @@ func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if m.TabletAlias != nil {
- size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i])
+ if m.Cell != nil {
+ size, err := m.Cell.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
return 0, err
}
@@ -5013,7 +5030,7 @@ func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) {
+func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5026,12 +5043,12 @@ func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *GetVersionResponse) MarshalToVT(dAtA []byte) (int, error) {
+func (m *TopologyCell) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5043,60 +5060,40 @@ func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarint(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) {
- if m == nil {
- return nil, nil
- }
- size := m.SizeVT()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBufferVT(dAtA[:size])
- if err != nil {
- return nil, err
+ if len(m.Children) > 0 {
+ for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Children[iNdEx])
+ copy(dAtA[i:], m.Children[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.Children[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
}
- return dAtA[:n], nil
-}
-
-func (m *GetVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) {
- size := m.SizeVT()
- return m.MarshalToSizedBufferVT(dAtA[:size])
-}
-
-func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
- if m == nil {
- return 0, nil
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarint(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x1a
}
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.unknownFields != nil {
- i -= len(m.unknownFields)
- copy(dAtA[i:], m.unknownFields)
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x12
}
- if m.VSchema != nil {
- size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarint(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) {
+func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5109,12 +5106,12 @@ func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *GetWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5126,16 +5123,6 @@ func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if m.ActiveOnly {
- i--
- if m.ActiveOnly {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
if len(m.Keyspace) > 0 {
i -= len(m.Keyspace)
copy(dAtA[i:], m.Keyspace)
@@ -5146,7 +5133,7 @@ func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) {
+func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5159,12 +5146,12 @@ func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *GetWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetVersionRequest) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5176,22 +5163,20 @@ func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error)
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if len(m.Workflows) > 0 {
- for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- {
- size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
- i--
- dAtA[i] = 0xa
+ if m.TabletAlias != nil {
+ size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) {
+func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5204,12 +5189,12 @@ func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *InitShardPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetVersionResponse) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5221,54 +5206,17 @@ func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if m.WaitReplicasTimeout != nil {
- size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
- i--
- dAtA[i] = 0x2a
- }
- if m.Force {
- i--
- if m.Force {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.PrimaryElectTabletAlias != nil {
- size, err := m.PrimaryElectTabletAlias.MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Shard) > 0 {
- i -= len(m.Shard)
- copy(dAtA[i:], m.Shard)
- i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Keyspace) > 0 {
- i -= len(m.Keyspace)
- copy(dAtA[i:], m.Keyspace)
- i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarint(dAtA, i, uint64(len(m.Version)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) {
+func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5281,12 +5229,12 @@ func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *InitShardPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5298,22 +5246,20 @@ func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if len(m.Events) > 0 {
- for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
- size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
- i--
- dAtA[i] = 0xa
+ if m.VSchema != nil {
+ size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) {
+func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5326,12 +5272,12 @@ func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *PingTabletRequest) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5343,20 +5289,27 @@ func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
- if m.TabletAlias != nil {
- size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.ActiveOnly {
+ i--
+ if m.ActiveOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) {
+func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5369,12 +5322,12 @@ func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *PingTabletResponse) MarshalToVT(dAtA []byte) (int, error) {
+func (m *GetWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5386,10 +5339,22 @@ func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.Workflows) > 0 {
+ for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
return len(dAtA) - i, nil
}
-func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) {
+func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
@@ -5402,12 +5367,12 @@ func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *PlannedReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+func (m *InitShardPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
-func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
@@ -5429,18 +5394,226 @@ func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int,
i--
dAtA[i] = 0x2a
}
- if m.AvoidPrimary != nil {
- size, err := m.AvoidPrimary.MarshalToSizedBufferVT(dAtA[:i])
- if err != nil {
- return 0, err
+ if m.Force {
+ i--
+ if m.Force {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
- i -= size
- i = encodeVarint(dAtA, i, uint64(size))
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0x20
}
- if m.NewPrimary != nil {
- size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i])
+ if m.PrimaryElectTabletAlias != nil {
+ size, err := m.PrimaryElectTabletAlias.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InitShardPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Events) > 0 {
+ for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PingTabletRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.TabletAlias != nil {
+ size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PingTabletResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PlannedReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ExpectedPrimary != nil {
+ size, err := m.ExpectedPrimary.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.WaitReplicasTimeout != nil {
+ size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.AvoidPrimary != nil {
+ size, err := m.AvoidPrimary.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.NewPrimary != nil {
+ size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
return 0, err
}
@@ -6605,6 +6778,15 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.AllowedBackupEngines) > 0 {
+ for iNdEx := len(m.AllowedBackupEngines) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AllowedBackupEngines[iNdEx])
+ copy(dAtA[i:], m.AllowedBackupEngines[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.AllowedBackupEngines[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
if m.BackupTime != nil {
size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
@@ -9126,6 +9308,95 @@ func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (i
return len(dAtA) - i, nil
}
+func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Shard) > 0 {
+ i -= len(m.Shard)
+ copy(dAtA[i:], m.Shard)
+ i = encodeVarint(dAtA, i, uint64(len(m.Shard)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Keyspace) > 0 {
+ i -= len(m.Keyspace)
+ copy(dAtA[i:], m.Keyspace)
+ i = encodeVarint(dAtA, i, uint64(len(m.Keyspace)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidateVersionShardResponse) MarshalToVT(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Results) > 0 {
+ for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Results[iNdEx])
+ copy(dAtA[i:], m.Results[iNdEx])
+ i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
@@ -9939,6 +10210,10 @@ func (m *BackupRequest) SizeVT() (n int) {
if m.Concurrency != 0 {
n += 1 + sov(uint64(m.Concurrency))
}
+ if m.BackupEngine != nil {
+ l = len(*m.BackupEngine)
+ n += 1 + l + sov(uint64(l))
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -10376,6 +10651,10 @@ func (m *EmergencyReparentShardRequest) SizeVT() (n int) {
if m.PreventCrossCellPromotion {
n += 2
}
+ if m.ExpectedPrimary != nil {
+ l = m.ExpectedPrimary.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -11267,6 +11546,68 @@ func (m *GetTabletsResponse) SizeVT() (n int) {
return n
}
+func (m *GetTopologyPathRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
+func (m *GetTopologyPathResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Cell != nil {
+ l = m.Cell.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
+func (m *TopologyCell) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if len(m.Children) > 0 {
+ for _, s := range m.Children {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
func (m *GetVSchemaRequest) SizeVT() (n int) {
if m == nil {
return 0
@@ -11471,6 +11812,10 @@ func (m *PlannedReparentShardRequest) SizeVT() (n int) {
l = m.WaitReplicasTimeout.SizeVT()
n += 1 + l + sov(uint64(l))
}
+ if m.ExpectedPrimary != nil {
+ l = m.ExpectedPrimary.SizeVT()
+ n += 1 + l + sov(uint64(l))
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -11937,6 +12282,12 @@ func (m *RestoreFromBackupRequest) SizeVT() (n int) {
l = m.BackupTime.SizeVT()
n += 1 + l + sov(uint64(l))
}
+ if len(m.AllowedBackupEngines) > 0 {
+ for _, s := range m.AllowedBackupEngines {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -12969,6 +13320,44 @@ func (m *ValidateVersionKeyspaceResponse) SizeVT() (n int) {
return n
}
+func (m *ValidateVersionShardRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Keyspace)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.Shard)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
+func (m *ValidateVersionShardResponse) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Results) > 0 {
+ for _, s := range m.Results {
+ l = len(s)
+ n += 1 + l + sov(uint64(l))
+ }
+ }
+ if m.unknownFields != nil {
+ n += len(m.unknownFields)
+ }
+ return n
+}
+
func (m *ValidateVSchemaRequest) SizeVT() (n int) {
if m == nil {
return 0
@@ -17077,128 +17466,9 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error {
break
}
}
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BackupResponse) UnmarshalVT(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLength
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLength
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.TabletAlias == nil {
- m.TabletAlias = &topodata.TabletAlias{}
- }
- if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLength
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLength
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Keyspace = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field BackupEngine", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -17226,43 +17496,195 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Shard = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLength
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLength
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Event == nil {
- m.Event = &logutil.Event{}
- }
- if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.BackupEngine = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BackupResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TabletAlias == nil {
+ m.TabletAlias = &topodata.TabletAlias{}
+ }
+ if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Event == nil {
+ m.Event = &logutil.Event{}
+ }
+ if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -19537,124 +19959,160 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error {
}
}
m.PreventCrossCellPromotion = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLength
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLength
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Keyspace = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLength
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLength
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Shard = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
+ case 8:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPrimary", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExpectedPrimary == nil {
+ m.ExpectedPrimary = &topodata.TabletAlias{}
+ }
+ if err := m.ExpectedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -24067,15 +24525,409 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TabletAlias == nil {
+ m.TabletAlias = &topodata.TabletAlias{}
+ }
+ if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Tablet == nil {
+ m.Tablet = &topodata.Tablet{}
+ }
+ if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Strict = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{})
+ if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType)
+ }
+ m.TabletType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TabletType |= topodata.TabletType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -24102,10 +24954,8 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.TabletAlias == nil {
- m.TabletAlias = &topodata.TabletAlias{}
- }
- if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ m.Tablets = append(m.Tablets, &topodata.Tablet{})
+ if err := m.Tablets[len(m.Tablets)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -24131,7 +24981,7 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error {
+func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24154,17 +25004,17 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -24174,27 +25024,23 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Tablet == nil {
- m.Tablet = &topodata.Tablet{}
- }
- if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -24218,7 +25064,7 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
-func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
+func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -24241,17 +25087,17 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: GetTopologyPathResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: GetTopologyPathResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -24261,27 +25107,82 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Keyspace = string(dAtA[iNdEx:postIndex])
+ if m.Cell == nil {
+ m.Cell = &TopologyCell{}
+ }
+ if err := m.Cell.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TopologyCell) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TopologyCell: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TopologyCell: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -24309,11 +25210,11 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Shard = string(dAtA[iNdEx:postIndex])
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 3:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -24341,33 +25242,13 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex]))
+ m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Strict = bool(v != 0)
- case 5:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -24377,101 +25258,29 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{})
- if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Data = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType)
- }
- m.TabletType = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TabletType |= topodata.TabletType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skip(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLength
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflow
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -24481,25 +25290,23 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Tablets = append(m.Tablets, &topodata.Tablet{})
- if err := m.Tablets[len(m.Tablets)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Children = append(m.Children, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -25540,13 +26347,45 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Keyspace = string(dAtA[iNdEx:postIndex])
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
@@ -25556,27 +26395,31 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLength
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Shard = string(dAtA[iNdEx:postIndex])
+ if m.NewPrimary == nil {
+ m.NewPrimary = &topodata.TabletAlias{}
+ }
+ if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 3:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -25603,16 +26446,16 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NewPrimary == nil {
- m.NewPrimary = &topodata.TabletAlias{}
+ if m.AvoidPrimary == nil {
+ m.AvoidPrimary = &topodata.TabletAlias{}
}
- if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.AvoidPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 4:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -25639,16 +26482,16 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.AvoidPrimary == nil {
- m.AvoidPrimary = &topodata.TabletAlias{}
+ if m.WaitReplicasTimeout == nil {
+ m.WaitReplicasTimeout = &vttime.Duration{}
}
- if err := m.AvoidPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 5:
+ case 8:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPrimary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -25675,10 +26518,10 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.WaitReplicasTimeout == nil {
- m.WaitReplicasTimeout = &vttime.Duration{}
+ if m.ExpectedPrimary == nil {
+ m.ExpectedPrimary = &topodata.TabletAlias{}
}
- if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ExpectedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -28226,6 +29069,38 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowedBackupEngines", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AllowedBackupEngines = append(m.AllowedBackupEngines, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
@@ -34277,6 +35152,204 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error {
}
return nil
}
+func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keyspace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Shard = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidateVersionShardResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidateVersionShardResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Results = append(m.Results, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skip(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLength
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go
index 5981248bde7..fab6c3b40f7 100644
--- a/go/vt/proto/vtctlservice/vtctlservice.pb.go
+++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go
@@ -51,7 +51,7 @@ var file_vtctlservice_proto_rawDesc = []byte{
0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63,
0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74,
0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xdc, 0x3a, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c,
+ 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xa3, 0x3c, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c,
0x64, 0x12, 0x4e, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f,
0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x64, 0x64,
0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
@@ -273,258 +273,271 @@ var file_vtctlservice_proto_rawDesc = []byte{
0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b,
- 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47,
- 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d,
- 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49,
+ 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50,
+ 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a,
+ 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65,
+ 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f,
+ 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e,
+ 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49,
0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a,
- 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c,
- 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50,
- 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c,
- 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65,
- 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
- 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69,
- 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72,
- 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52,
- 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
- 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26,
- 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61,
- 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, 0x6e,
+ 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65,
+ 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e,
+ 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
- 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c,
- 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52,
- 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21,
+ 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65,
+ 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61,
+ 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13,
+ 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72,
+ 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72,
+ 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65,
+ 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x25,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65,
+ 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+ 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f,
+ 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f,
+ 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a,
+ 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12,
+ 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f,
+ 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f,
+ 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65,
- 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72,
- 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
- 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a,
- 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61,
- 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a,
- 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73,
- 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54,
+ 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01,
+ 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x74,
+ 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69,
+ 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44,
+ 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d,
- 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c,
- 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65,
- 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74,
- 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74,
- 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
- 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
- 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x6c, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61,
+ 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53,
+ 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e,
+ 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1d, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66,
+ 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x12, 0x25, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52,
+ 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78,
+ 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
+ 0x76, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76,
0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d,
- 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a,
- 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76,
- 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a,
- 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12,
- 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x6c, 0x65,
+ 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x76, 0x74,
0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72,
- 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76,
+ 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76,
0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61,
- 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63,
- 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
- 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74,
- 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65,
- 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
- 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
- 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e,
- 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e,
+ 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74,
+ 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43,
+ 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74,
+ 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c,
+ 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e,
0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
- 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
- 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f,
- 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
- 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74,
- 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, 0x76,
+ 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76,
0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
- 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b,
+ 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29,
+ 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x56,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1f, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x00, 0x12, 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76,
+ 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e,
+ 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74,
+ 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29,
+ 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63,
+ 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
}
var file_vtctlservice_proto_goTypes = []interface{}{
@@ -569,128 +582,132 @@ var file_vtctlservice_proto_goTypes = []interface{}{
(*vtctldata.GetSrvVSchemasRequest)(nil), // 38: vtctldata.GetSrvVSchemasRequest
(*vtctldata.GetTabletRequest)(nil), // 39: vtctldata.GetTabletRequest
(*vtctldata.GetTabletsRequest)(nil), // 40: vtctldata.GetTabletsRequest
- (*vtctldata.GetVersionRequest)(nil), // 41: vtctldata.GetVersionRequest
- (*vtctldata.GetVSchemaRequest)(nil), // 42: vtctldata.GetVSchemaRequest
- (*vtctldata.GetWorkflowsRequest)(nil), // 43: vtctldata.GetWorkflowsRequest
- (*vtctldata.InitShardPrimaryRequest)(nil), // 44: vtctldata.InitShardPrimaryRequest
- (*vtctldata.PingTabletRequest)(nil), // 45: vtctldata.PingTabletRequest
- (*vtctldata.PlannedReparentShardRequest)(nil), // 46: vtctldata.PlannedReparentShardRequest
- (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 47: vtctldata.RebuildKeyspaceGraphRequest
- (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 48: vtctldata.RebuildVSchemaGraphRequest
- (*vtctldata.RefreshStateRequest)(nil), // 49: vtctldata.RefreshStateRequest
- (*vtctldata.RefreshStateByShardRequest)(nil), // 50: vtctldata.RefreshStateByShardRequest
- (*vtctldata.ReloadSchemaRequest)(nil), // 51: vtctldata.ReloadSchemaRequest
- (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 52: vtctldata.ReloadSchemaKeyspaceRequest
- (*vtctldata.ReloadSchemaShardRequest)(nil), // 53: vtctldata.ReloadSchemaShardRequest
- (*vtctldata.RemoveBackupRequest)(nil), // 54: vtctldata.RemoveBackupRequest
- (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 55: vtctldata.RemoveKeyspaceCellRequest
- (*vtctldata.RemoveShardCellRequest)(nil), // 56: vtctldata.RemoveShardCellRequest
- (*vtctldata.ReparentTabletRequest)(nil), // 57: vtctldata.ReparentTabletRequest
- (*vtctldata.RestoreFromBackupRequest)(nil), // 58: vtctldata.RestoreFromBackupRequest
- (*vtctldata.RunHealthCheckRequest)(nil), // 59: vtctldata.RunHealthCheckRequest
- (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 60: vtctldata.SetKeyspaceDurabilityPolicyRequest
- (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 61: vtctldata.SetShardIsPrimaryServingRequest
- (*vtctldata.SetShardTabletControlRequest)(nil), // 62: vtctldata.SetShardTabletControlRequest
- (*vtctldata.SetWritableRequest)(nil), // 63: vtctldata.SetWritableRequest
- (*vtctldata.ShardReplicationAddRequest)(nil), // 64: vtctldata.ShardReplicationAddRequest
- (*vtctldata.ShardReplicationFixRequest)(nil), // 65: vtctldata.ShardReplicationFixRequest
- (*vtctldata.ShardReplicationPositionsRequest)(nil), // 66: vtctldata.ShardReplicationPositionsRequest
- (*vtctldata.ShardReplicationRemoveRequest)(nil), // 67: vtctldata.ShardReplicationRemoveRequest
- (*vtctldata.SleepTabletRequest)(nil), // 68: vtctldata.SleepTabletRequest
- (*vtctldata.SourceShardAddRequest)(nil), // 69: vtctldata.SourceShardAddRequest
- (*vtctldata.SourceShardDeleteRequest)(nil), // 70: vtctldata.SourceShardDeleteRequest
- (*vtctldata.StartReplicationRequest)(nil), // 71: vtctldata.StartReplicationRequest
- (*vtctldata.StopReplicationRequest)(nil), // 72: vtctldata.StopReplicationRequest
- (*vtctldata.TabletExternallyReparentedRequest)(nil), // 73: vtctldata.TabletExternallyReparentedRequest
- (*vtctldata.UpdateCellInfoRequest)(nil), // 74: vtctldata.UpdateCellInfoRequest
- (*vtctldata.UpdateCellsAliasRequest)(nil), // 75: vtctldata.UpdateCellsAliasRequest
- (*vtctldata.ValidateRequest)(nil), // 76: vtctldata.ValidateRequest
- (*vtctldata.ValidateKeyspaceRequest)(nil), // 77: vtctldata.ValidateKeyspaceRequest
- (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 78: vtctldata.ValidateSchemaKeyspaceRequest
- (*vtctldata.ValidateShardRequest)(nil), // 79: vtctldata.ValidateShardRequest
- (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 80: vtctldata.ValidateVersionKeyspaceRequest
- (*vtctldata.ValidateVSchemaRequest)(nil), // 81: vtctldata.ValidateVSchemaRequest
- (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 82: vtctldata.ExecuteVtctlCommandResponse
- (*vtctldata.AddCellInfoResponse)(nil), // 83: vtctldata.AddCellInfoResponse
- (*vtctldata.AddCellsAliasResponse)(nil), // 84: vtctldata.AddCellsAliasResponse
- (*vtctldata.ApplyRoutingRulesResponse)(nil), // 85: vtctldata.ApplyRoutingRulesResponse
- (*vtctldata.ApplySchemaResponse)(nil), // 86: vtctldata.ApplySchemaResponse
- (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 87: vtctldata.ApplyShardRoutingRulesResponse
- (*vtctldata.ApplyVSchemaResponse)(nil), // 88: vtctldata.ApplyVSchemaResponse
- (*vtctldata.BackupResponse)(nil), // 89: vtctldata.BackupResponse
- (*vtctldata.ChangeTabletTypeResponse)(nil), // 90: vtctldata.ChangeTabletTypeResponse
- (*vtctldata.CreateKeyspaceResponse)(nil), // 91: vtctldata.CreateKeyspaceResponse
- (*vtctldata.CreateShardResponse)(nil), // 92: vtctldata.CreateShardResponse
- (*vtctldata.DeleteCellInfoResponse)(nil), // 93: vtctldata.DeleteCellInfoResponse
- (*vtctldata.DeleteCellsAliasResponse)(nil), // 94: vtctldata.DeleteCellsAliasResponse
- (*vtctldata.DeleteKeyspaceResponse)(nil), // 95: vtctldata.DeleteKeyspaceResponse
- (*vtctldata.DeleteShardsResponse)(nil), // 96: vtctldata.DeleteShardsResponse
- (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 97: vtctldata.DeleteSrvVSchemaResponse
- (*vtctldata.DeleteTabletsResponse)(nil), // 98: vtctldata.DeleteTabletsResponse
- (*vtctldata.EmergencyReparentShardResponse)(nil), // 99: vtctldata.EmergencyReparentShardResponse
- (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 100: vtctldata.ExecuteFetchAsAppResponse
- (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 101: vtctldata.ExecuteFetchAsDBAResponse
- (*vtctldata.ExecuteHookResponse)(nil), // 102: vtctldata.ExecuteHookResponse
- (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 103: vtctldata.FindAllShardsInKeyspaceResponse
- (*vtctldata.GetBackupsResponse)(nil), // 104: vtctldata.GetBackupsResponse
- (*vtctldata.GetCellInfoResponse)(nil), // 105: vtctldata.GetCellInfoResponse
- (*vtctldata.GetCellInfoNamesResponse)(nil), // 106: vtctldata.GetCellInfoNamesResponse
- (*vtctldata.GetCellsAliasesResponse)(nil), // 107: vtctldata.GetCellsAliasesResponse
- (*vtctldata.GetFullStatusResponse)(nil), // 108: vtctldata.GetFullStatusResponse
- (*vtctldata.GetKeyspaceResponse)(nil), // 109: vtctldata.GetKeyspaceResponse
- (*vtctldata.GetKeyspacesResponse)(nil), // 110: vtctldata.GetKeyspacesResponse
- (*vtctldata.GetPermissionsResponse)(nil), // 111: vtctldata.GetPermissionsResponse
- (*vtctldata.GetRoutingRulesResponse)(nil), // 112: vtctldata.GetRoutingRulesResponse
- (*vtctldata.GetSchemaResponse)(nil), // 113: vtctldata.GetSchemaResponse
- (*vtctldata.GetShardResponse)(nil), // 114: vtctldata.GetShardResponse
- (*vtctldata.GetShardRoutingRulesResponse)(nil), // 115: vtctldata.GetShardRoutingRulesResponse
- (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 116: vtctldata.GetSrvKeyspaceNamesResponse
- (*vtctldata.GetSrvKeyspacesResponse)(nil), // 117: vtctldata.GetSrvKeyspacesResponse
- (*vtctldata.GetSrvVSchemaResponse)(nil), // 118: vtctldata.GetSrvVSchemaResponse
- (*vtctldata.GetSrvVSchemasResponse)(nil), // 119: vtctldata.GetSrvVSchemasResponse
- (*vtctldata.GetTabletResponse)(nil), // 120: vtctldata.GetTabletResponse
- (*vtctldata.GetTabletsResponse)(nil), // 121: vtctldata.GetTabletsResponse
- (*vtctldata.GetVersionResponse)(nil), // 122: vtctldata.GetVersionResponse
- (*vtctldata.GetVSchemaResponse)(nil), // 123: vtctldata.GetVSchemaResponse
- (*vtctldata.GetWorkflowsResponse)(nil), // 124: vtctldata.GetWorkflowsResponse
- (*vtctldata.InitShardPrimaryResponse)(nil), // 125: vtctldata.InitShardPrimaryResponse
- (*vtctldata.PingTabletResponse)(nil), // 126: vtctldata.PingTabletResponse
- (*vtctldata.PlannedReparentShardResponse)(nil), // 127: vtctldata.PlannedReparentShardResponse
- (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 128: vtctldata.RebuildKeyspaceGraphResponse
- (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 129: vtctldata.RebuildVSchemaGraphResponse
- (*vtctldata.RefreshStateResponse)(nil), // 130: vtctldata.RefreshStateResponse
- (*vtctldata.RefreshStateByShardResponse)(nil), // 131: vtctldata.RefreshStateByShardResponse
- (*vtctldata.ReloadSchemaResponse)(nil), // 132: vtctldata.ReloadSchemaResponse
- (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 133: vtctldata.ReloadSchemaKeyspaceResponse
- (*vtctldata.ReloadSchemaShardResponse)(nil), // 134: vtctldata.ReloadSchemaShardResponse
- (*vtctldata.RemoveBackupResponse)(nil), // 135: vtctldata.RemoveBackupResponse
- (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 136: vtctldata.RemoveKeyspaceCellResponse
- (*vtctldata.RemoveShardCellResponse)(nil), // 137: vtctldata.RemoveShardCellResponse
- (*vtctldata.ReparentTabletResponse)(nil), // 138: vtctldata.ReparentTabletResponse
- (*vtctldata.RestoreFromBackupResponse)(nil), // 139: vtctldata.RestoreFromBackupResponse
- (*vtctldata.RunHealthCheckResponse)(nil), // 140: vtctldata.RunHealthCheckResponse
- (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 141: vtctldata.SetKeyspaceDurabilityPolicyResponse
- (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 142: vtctldata.SetShardIsPrimaryServingResponse
- (*vtctldata.SetShardTabletControlResponse)(nil), // 143: vtctldata.SetShardTabletControlResponse
- (*vtctldata.SetWritableResponse)(nil), // 144: vtctldata.SetWritableResponse
- (*vtctldata.ShardReplicationAddResponse)(nil), // 145: vtctldata.ShardReplicationAddResponse
- (*vtctldata.ShardReplicationFixResponse)(nil), // 146: vtctldata.ShardReplicationFixResponse
- (*vtctldata.ShardReplicationPositionsResponse)(nil), // 147: vtctldata.ShardReplicationPositionsResponse
- (*vtctldata.ShardReplicationRemoveResponse)(nil), // 148: vtctldata.ShardReplicationRemoveResponse
- (*vtctldata.SleepTabletResponse)(nil), // 149: vtctldata.SleepTabletResponse
- (*vtctldata.SourceShardAddResponse)(nil), // 150: vtctldata.SourceShardAddResponse
- (*vtctldata.SourceShardDeleteResponse)(nil), // 151: vtctldata.SourceShardDeleteResponse
- (*vtctldata.StartReplicationResponse)(nil), // 152: vtctldata.StartReplicationResponse
- (*vtctldata.StopReplicationResponse)(nil), // 153: vtctldata.StopReplicationResponse
- (*vtctldata.TabletExternallyReparentedResponse)(nil), // 154: vtctldata.TabletExternallyReparentedResponse
- (*vtctldata.UpdateCellInfoResponse)(nil), // 155: vtctldata.UpdateCellInfoResponse
- (*vtctldata.UpdateCellsAliasResponse)(nil), // 156: vtctldata.UpdateCellsAliasResponse
- (*vtctldata.ValidateResponse)(nil), // 157: vtctldata.ValidateResponse
- (*vtctldata.ValidateKeyspaceResponse)(nil), // 158: vtctldata.ValidateKeyspaceResponse
- (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 159: vtctldata.ValidateSchemaKeyspaceResponse
- (*vtctldata.ValidateShardResponse)(nil), // 160: vtctldata.ValidateShardResponse
- (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 161: vtctldata.ValidateVersionKeyspaceResponse
- (*vtctldata.ValidateVSchemaResponse)(nil), // 162: vtctldata.ValidateVSchemaResponse
+ (*vtctldata.GetTopologyPathRequest)(nil), // 41: vtctldata.GetTopologyPathRequest
+ (*vtctldata.GetVersionRequest)(nil), // 42: vtctldata.GetVersionRequest
+ (*vtctldata.GetVSchemaRequest)(nil), // 43: vtctldata.GetVSchemaRequest
+ (*vtctldata.GetWorkflowsRequest)(nil), // 44: vtctldata.GetWorkflowsRequest
+ (*vtctldata.InitShardPrimaryRequest)(nil), // 45: vtctldata.InitShardPrimaryRequest
+ (*vtctldata.PingTabletRequest)(nil), // 46: vtctldata.PingTabletRequest
+ (*vtctldata.PlannedReparentShardRequest)(nil), // 47: vtctldata.PlannedReparentShardRequest
+ (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 48: vtctldata.RebuildKeyspaceGraphRequest
+ (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 49: vtctldata.RebuildVSchemaGraphRequest
+ (*vtctldata.RefreshStateRequest)(nil), // 50: vtctldata.RefreshStateRequest
+ (*vtctldata.RefreshStateByShardRequest)(nil), // 51: vtctldata.RefreshStateByShardRequest
+ (*vtctldata.ReloadSchemaRequest)(nil), // 52: vtctldata.ReloadSchemaRequest
+ (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 53: vtctldata.ReloadSchemaKeyspaceRequest
+ (*vtctldata.ReloadSchemaShardRequest)(nil), // 54: vtctldata.ReloadSchemaShardRequest
+ (*vtctldata.RemoveBackupRequest)(nil), // 55: vtctldata.RemoveBackupRequest
+ (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 56: vtctldata.RemoveKeyspaceCellRequest
+ (*vtctldata.RemoveShardCellRequest)(nil), // 57: vtctldata.RemoveShardCellRequest
+ (*vtctldata.ReparentTabletRequest)(nil), // 58: vtctldata.ReparentTabletRequest
+ (*vtctldata.RestoreFromBackupRequest)(nil), // 59: vtctldata.RestoreFromBackupRequest
+ (*vtctldata.RunHealthCheckRequest)(nil), // 60: vtctldata.RunHealthCheckRequest
+ (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 61: vtctldata.SetKeyspaceDurabilityPolicyRequest
+ (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 62: vtctldata.SetShardIsPrimaryServingRequest
+ (*vtctldata.SetShardTabletControlRequest)(nil), // 63: vtctldata.SetShardTabletControlRequest
+ (*vtctldata.SetWritableRequest)(nil), // 64: vtctldata.SetWritableRequest
+ (*vtctldata.ShardReplicationAddRequest)(nil), // 65: vtctldata.ShardReplicationAddRequest
+ (*vtctldata.ShardReplicationFixRequest)(nil), // 66: vtctldata.ShardReplicationFixRequest
+ (*vtctldata.ShardReplicationPositionsRequest)(nil), // 67: vtctldata.ShardReplicationPositionsRequest
+ (*vtctldata.ShardReplicationRemoveRequest)(nil), // 68: vtctldata.ShardReplicationRemoveRequest
+ (*vtctldata.SleepTabletRequest)(nil), // 69: vtctldata.SleepTabletRequest
+ (*vtctldata.SourceShardAddRequest)(nil), // 70: vtctldata.SourceShardAddRequest
+ (*vtctldata.SourceShardDeleteRequest)(nil), // 71: vtctldata.SourceShardDeleteRequest
+ (*vtctldata.StartReplicationRequest)(nil), // 72: vtctldata.StartReplicationRequest
+ (*vtctldata.StopReplicationRequest)(nil), // 73: vtctldata.StopReplicationRequest
+ (*vtctldata.TabletExternallyReparentedRequest)(nil), // 74: vtctldata.TabletExternallyReparentedRequest
+ (*vtctldata.UpdateCellInfoRequest)(nil), // 75: vtctldata.UpdateCellInfoRequest
+ (*vtctldata.UpdateCellsAliasRequest)(nil), // 76: vtctldata.UpdateCellsAliasRequest
+ (*vtctldata.ValidateRequest)(nil), // 77: vtctldata.ValidateRequest
+ (*vtctldata.ValidateKeyspaceRequest)(nil), // 78: vtctldata.ValidateKeyspaceRequest
+ (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 79: vtctldata.ValidateSchemaKeyspaceRequest
+ (*vtctldata.ValidateShardRequest)(nil), // 80: vtctldata.ValidateShardRequest
+ (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 81: vtctldata.ValidateVersionKeyspaceRequest
+ (*vtctldata.ValidateVersionShardRequest)(nil), // 82: vtctldata.ValidateVersionShardRequest
+ (*vtctldata.ValidateVSchemaRequest)(nil), // 83: vtctldata.ValidateVSchemaRequest
+ (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 84: vtctldata.ExecuteVtctlCommandResponse
+ (*vtctldata.AddCellInfoResponse)(nil), // 85: vtctldata.AddCellInfoResponse
+ (*vtctldata.AddCellsAliasResponse)(nil), // 86: vtctldata.AddCellsAliasResponse
+ (*vtctldata.ApplyRoutingRulesResponse)(nil), // 87: vtctldata.ApplyRoutingRulesResponse
+ (*vtctldata.ApplySchemaResponse)(nil), // 88: vtctldata.ApplySchemaResponse
+ (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 89: vtctldata.ApplyShardRoutingRulesResponse
+ (*vtctldata.ApplyVSchemaResponse)(nil), // 90: vtctldata.ApplyVSchemaResponse
+ (*vtctldata.BackupResponse)(nil), // 91: vtctldata.BackupResponse
+ (*vtctldata.ChangeTabletTypeResponse)(nil), // 92: vtctldata.ChangeTabletTypeResponse
+ (*vtctldata.CreateKeyspaceResponse)(nil), // 93: vtctldata.CreateKeyspaceResponse
+ (*vtctldata.CreateShardResponse)(nil), // 94: vtctldata.CreateShardResponse
+ (*vtctldata.DeleteCellInfoResponse)(nil), // 95: vtctldata.DeleteCellInfoResponse
+ (*vtctldata.DeleteCellsAliasResponse)(nil), // 96: vtctldata.DeleteCellsAliasResponse
+ (*vtctldata.DeleteKeyspaceResponse)(nil), // 97: vtctldata.DeleteKeyspaceResponse
+ (*vtctldata.DeleteShardsResponse)(nil), // 98: vtctldata.DeleteShardsResponse
+ (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 99: vtctldata.DeleteSrvVSchemaResponse
+ (*vtctldata.DeleteTabletsResponse)(nil), // 100: vtctldata.DeleteTabletsResponse
+ (*vtctldata.EmergencyReparentShardResponse)(nil), // 101: vtctldata.EmergencyReparentShardResponse
+ (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 102: vtctldata.ExecuteFetchAsAppResponse
+ (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 103: vtctldata.ExecuteFetchAsDBAResponse
+ (*vtctldata.ExecuteHookResponse)(nil), // 104: vtctldata.ExecuteHookResponse
+ (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 105: vtctldata.FindAllShardsInKeyspaceResponse
+ (*vtctldata.GetBackupsResponse)(nil), // 106: vtctldata.GetBackupsResponse
+ (*vtctldata.GetCellInfoResponse)(nil), // 107: vtctldata.GetCellInfoResponse
+ (*vtctldata.GetCellInfoNamesResponse)(nil), // 108: vtctldata.GetCellInfoNamesResponse
+ (*vtctldata.GetCellsAliasesResponse)(nil), // 109: vtctldata.GetCellsAliasesResponse
+ (*vtctldata.GetFullStatusResponse)(nil), // 110: vtctldata.GetFullStatusResponse
+ (*vtctldata.GetKeyspaceResponse)(nil), // 111: vtctldata.GetKeyspaceResponse
+ (*vtctldata.GetKeyspacesResponse)(nil), // 112: vtctldata.GetKeyspacesResponse
+ (*vtctldata.GetPermissionsResponse)(nil), // 113: vtctldata.GetPermissionsResponse
+ (*vtctldata.GetRoutingRulesResponse)(nil), // 114: vtctldata.GetRoutingRulesResponse
+ (*vtctldata.GetSchemaResponse)(nil), // 115: vtctldata.GetSchemaResponse
+ (*vtctldata.GetShardResponse)(nil), // 116: vtctldata.GetShardResponse
+ (*vtctldata.GetShardRoutingRulesResponse)(nil), // 117: vtctldata.GetShardRoutingRulesResponse
+ (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 118: vtctldata.GetSrvKeyspaceNamesResponse
+ (*vtctldata.GetSrvKeyspacesResponse)(nil), // 119: vtctldata.GetSrvKeyspacesResponse
+ (*vtctldata.GetSrvVSchemaResponse)(nil), // 120: vtctldata.GetSrvVSchemaResponse
+ (*vtctldata.GetSrvVSchemasResponse)(nil), // 121: vtctldata.GetSrvVSchemasResponse
+ (*vtctldata.GetTabletResponse)(nil), // 122: vtctldata.GetTabletResponse
+ (*vtctldata.GetTabletsResponse)(nil), // 123: vtctldata.GetTabletsResponse
+ (*vtctldata.GetTopologyPathResponse)(nil), // 124: vtctldata.GetTopologyPathResponse
+ (*vtctldata.GetVersionResponse)(nil), // 125: vtctldata.GetVersionResponse
+ (*vtctldata.GetVSchemaResponse)(nil), // 126: vtctldata.GetVSchemaResponse
+ (*vtctldata.GetWorkflowsResponse)(nil), // 127: vtctldata.GetWorkflowsResponse
+ (*vtctldata.InitShardPrimaryResponse)(nil), // 128: vtctldata.InitShardPrimaryResponse
+ (*vtctldata.PingTabletResponse)(nil), // 129: vtctldata.PingTabletResponse
+ (*vtctldata.PlannedReparentShardResponse)(nil), // 130: vtctldata.PlannedReparentShardResponse
+ (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 131: vtctldata.RebuildKeyspaceGraphResponse
+ (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 132: vtctldata.RebuildVSchemaGraphResponse
+ (*vtctldata.RefreshStateResponse)(nil), // 133: vtctldata.RefreshStateResponse
+ (*vtctldata.RefreshStateByShardResponse)(nil), // 134: vtctldata.RefreshStateByShardResponse
+ (*vtctldata.ReloadSchemaResponse)(nil), // 135: vtctldata.ReloadSchemaResponse
+ (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 136: vtctldata.ReloadSchemaKeyspaceResponse
+ (*vtctldata.ReloadSchemaShardResponse)(nil), // 137: vtctldata.ReloadSchemaShardResponse
+ (*vtctldata.RemoveBackupResponse)(nil), // 138: vtctldata.RemoveBackupResponse
+ (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 139: vtctldata.RemoveKeyspaceCellResponse
+ (*vtctldata.RemoveShardCellResponse)(nil), // 140: vtctldata.RemoveShardCellResponse
+ (*vtctldata.ReparentTabletResponse)(nil), // 141: vtctldata.ReparentTabletResponse
+ (*vtctldata.RestoreFromBackupResponse)(nil), // 142: vtctldata.RestoreFromBackupResponse
+ (*vtctldata.RunHealthCheckResponse)(nil), // 143: vtctldata.RunHealthCheckResponse
+ (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 144: vtctldata.SetKeyspaceDurabilityPolicyResponse
+ (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 145: vtctldata.SetShardIsPrimaryServingResponse
+ (*vtctldata.SetShardTabletControlResponse)(nil), // 146: vtctldata.SetShardTabletControlResponse
+ (*vtctldata.SetWritableResponse)(nil), // 147: vtctldata.SetWritableResponse
+ (*vtctldata.ShardReplicationAddResponse)(nil), // 148: vtctldata.ShardReplicationAddResponse
+ (*vtctldata.ShardReplicationFixResponse)(nil), // 149: vtctldata.ShardReplicationFixResponse
+ (*vtctldata.ShardReplicationPositionsResponse)(nil), // 150: vtctldata.ShardReplicationPositionsResponse
+ (*vtctldata.ShardReplicationRemoveResponse)(nil), // 151: vtctldata.ShardReplicationRemoveResponse
+ (*vtctldata.SleepTabletResponse)(nil), // 152: vtctldata.SleepTabletResponse
+ (*vtctldata.SourceShardAddResponse)(nil), // 153: vtctldata.SourceShardAddResponse
+ (*vtctldata.SourceShardDeleteResponse)(nil), // 154: vtctldata.SourceShardDeleteResponse
+ (*vtctldata.StartReplicationResponse)(nil), // 155: vtctldata.StartReplicationResponse
+ (*vtctldata.StopReplicationResponse)(nil), // 156: vtctldata.StopReplicationResponse
+ (*vtctldata.TabletExternallyReparentedResponse)(nil), // 157: vtctldata.TabletExternallyReparentedResponse
+ (*vtctldata.UpdateCellInfoResponse)(nil), // 158: vtctldata.UpdateCellInfoResponse
+ (*vtctldata.UpdateCellsAliasResponse)(nil), // 159: vtctldata.UpdateCellsAliasResponse
+ (*vtctldata.ValidateResponse)(nil), // 160: vtctldata.ValidateResponse
+ (*vtctldata.ValidateKeyspaceResponse)(nil), // 161: vtctldata.ValidateKeyspaceResponse
+ (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 162: vtctldata.ValidateSchemaKeyspaceResponse
+ (*vtctldata.ValidateShardResponse)(nil), // 163: vtctldata.ValidateShardResponse
+ (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 164: vtctldata.ValidateVersionKeyspaceResponse
+ (*vtctldata.ValidateVersionShardResponse)(nil), // 165: vtctldata.ValidateVersionShardResponse
+ (*vtctldata.ValidateVSchemaResponse)(nil), // 166: vtctldata.ValidateVSchemaResponse
}
var file_vtctlservice_proto_depIdxs = []int32{
0, // 0: vtctlservice.Vtctl.ExecuteVtctlCommand:input_type -> vtctldata.ExecuteVtctlCommandRequest
@@ -734,131 +751,135 @@ var file_vtctlservice_proto_depIdxs = []int32{
38, // 38: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest
39, // 39: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest
40, // 40: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest
- 41, // 41: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest
- 42, // 42: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest
- 43, // 43: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest
- 44, // 44: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest
- 45, // 45: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest
- 46, // 46: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest
- 47, // 47: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest
- 48, // 48: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest
- 49, // 49: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest
- 50, // 50: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest
- 51, // 51: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest
- 52, // 52: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest
- 53, // 53: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest
- 54, // 54: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest
- 55, // 55: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest
- 56, // 56: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest
- 57, // 57: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest
- 58, // 58: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest
- 59, // 59: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest
- 60, // 60: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest
- 61, // 61: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest
- 62, // 62: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest
- 63, // 63: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest
- 64, // 64: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest
- 65, // 65: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest
- 66, // 66: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest
- 67, // 67: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest
- 68, // 68: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest
- 69, // 69: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest
- 70, // 70: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest
- 71, // 71: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest
- 72, // 72: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest
- 73, // 73: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest
- 74, // 74: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest
- 75, // 75: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest
- 76, // 76: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest
- 77, // 77: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest
- 78, // 78: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest
- 79, // 79: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest
- 80, // 80: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest
- 81, // 81: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest
- 82, // 82: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse
- 83, // 83: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse
- 84, // 84: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse
- 85, // 85: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse
- 86, // 86: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse
- 87, // 87: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse
- 88, // 88: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse
- 89, // 89: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse
- 89, // 90: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse
- 90, // 91: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse
- 91, // 92: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse
- 92, // 93: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse
- 93, // 94: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse
- 94, // 95: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse
- 95, // 96: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
- 96, // 97: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
- 97, // 98: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse
- 98, // 99: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse
- 99, // 100: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse
- 100, // 101: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse
- 101, // 102: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse
- 102, // 103: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse
- 103, // 104: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse
- 104, // 105: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse
- 105, // 106: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse
- 106, // 107: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse
- 107, // 108: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse
- 108, // 109: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse
- 109, // 110: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse
- 110, // 111: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse
- 111, // 112: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse
- 112, // 113: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse
- 113, // 114: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse
- 114, // 115: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse
- 115, // 116: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse
- 116, // 117: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse
- 117, // 118: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse
- 118, // 119: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse
- 119, // 120: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse
- 120, // 121: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse
- 121, // 122: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse
- 122, // 123: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse
- 123, // 124: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse
- 124, // 125: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse
- 125, // 126: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse
- 126, // 127: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse
- 127, // 128: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse
- 128, // 129: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse
- 129, // 130: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse
- 130, // 131: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse
- 131, // 132: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse
- 132, // 133: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse
- 133, // 134: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse
- 134, // 135: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse
- 135, // 136: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse
- 136, // 137: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse
- 137, // 138: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse
- 138, // 139: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse
- 139, // 140: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse
- 140, // 141: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse
- 141, // 142: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse
- 142, // 143: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse
- 143, // 144: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse
- 144, // 145: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse
- 145, // 146: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse
- 146, // 147: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse
- 147, // 148: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse
- 148, // 149: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse
- 149, // 150: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse
- 150, // 151: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse
- 151, // 152: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse
- 152, // 153: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse
- 153, // 154: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse
- 154, // 155: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse
- 155, // 156: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse
- 156, // 157: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse
- 157, // 158: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse
- 158, // 159: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
- 159, // 160: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
- 160, // 161: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse
- 161, // 162: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
- 162, // 163: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse
- 82, // [82:164] is the sub-list for method output_type
- 0, // [0:82] is the sub-list for method input_type
+ 41, // 41: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest
+ 42, // 42: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest
+ 43, // 43: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest
+ 44, // 44: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest
+ 45, // 45: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest
+ 46, // 46: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest
+ 47, // 47: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest
+ 48, // 48: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest
+ 49, // 49: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest
+ 50, // 50: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest
+ 51, // 51: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest
+ 52, // 52: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest
+ 53, // 53: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest
+ 54, // 54: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest
+ 55, // 55: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest
+ 56, // 56: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest
+ 57, // 57: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest
+ 58, // 58: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest
+ 59, // 59: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest
+ 60, // 60: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest
+ 61, // 61: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest
+ 62, // 62: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest
+ 63, // 63: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest
+ 64, // 64: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest
+ 65, // 65: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest
+ 66, // 66: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest
+ 67, // 67: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest
+ 68, // 68: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest
+ 69, // 69: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest
+ 70, // 70: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest
+ 71, // 71: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest
+ 72, // 72: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest
+ 73, // 73: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest
+ 74, // 74: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest
+ 75, // 75: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest
+ 76, // 76: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest
+ 77, // 77: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest
+ 78, // 78: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest
+ 79, // 79: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest
+ 80, // 80: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest
+ 81, // 81: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest
+ 82, // 82: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest
+ 83, // 83: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest
+ 84, // 84: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse
+ 85, // 85: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse
+ 86, // 86: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse
+ 87, // 87: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse
+ 88, // 88: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse
+ 89, // 89: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse
+ 90, // 90: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse
+ 91, // 91: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse
+ 91, // 92: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse
+ 92, // 93: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse
+ 93, // 94: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse
+ 94, // 95: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse
+ 95, // 96: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse
+ 96, // 97: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse
+ 97, // 98: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse
+ 98, // 99: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse
+ 99, // 100: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse
+ 100, // 101: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse
+ 101, // 102: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse
+ 102, // 103: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse
+ 103, // 104: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse
+ 104, // 105: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse
+ 105, // 106: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse
+ 106, // 107: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse
+ 107, // 108: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse
+ 108, // 109: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse
+ 109, // 110: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse
+ 110, // 111: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse
+ 111, // 112: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse
+ 112, // 113: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse
+ 113, // 114: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse
+ 114, // 115: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse
+ 115, // 116: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse
+ 116, // 117: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse
+ 117, // 118: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse
+ 118, // 119: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse
+ 119, // 120: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse
+ 120, // 121: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse
+ 121, // 122: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse
+ 122, // 123: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse
+ 123, // 124: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse
+ 124, // 125: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse
+ 125, // 126: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse
+ 126, // 127: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse
+ 127, // 128: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse
+ 128, // 129: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse
+ 129, // 130: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse
+ 130, // 131: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse
+ 131, // 132: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse
+ 132, // 133: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse
+ 133, // 134: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse
+ 134, // 135: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse
+ 135, // 136: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse
+ 136, // 137: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse
+ 137, // 138: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse
+ 138, // 139: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse
+ 139, // 140: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse
+ 140, // 141: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse
+ 141, // 142: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse
+ 142, // 143: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse
+ 143, // 144: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse
+ 144, // 145: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse
+ 145, // 146: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse
+ 146, // 147: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse
+ 147, // 148: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse
+ 148, // 149: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse
+ 149, // 150: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse
+ 150, // 151: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse
+ 151, // 152: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse
+ 152, // 153: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse
+ 153, // 154: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse
+ 154, // 155: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse
+ 155, // 156: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse
+ 156, // 157: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse
+ 157, // 158: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse
+ 158, // 159: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse
+ 159, // 160: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse
+ 160, // 161: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse
+ 161, // 162: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse
+ 162, // 163: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse
+ 163, // 164: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse
+ 164, // 165: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse
+ 165, // 166: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse
+ 166, // 167: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse
+ 84, // [84:168] is the sub-list for method output_type
+ 0, // [0:84] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
diff --git a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go
index 8173b8c5ed0..0e17723a363 100644
--- a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go
+++ b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go
@@ -244,6 +244,8 @@ type VtctldClient interface {
GetTablet(ctx context.Context, in *vtctldata.GetTabletRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletResponse, error)
// GetTablets returns tablets, optionally filtered by keyspace and shard.
GetTablets(ctx context.Context, in *vtctldata.GetTabletsRequest, opts ...grpc.CallOption) (*vtctldata.GetTabletsResponse, error)
+ // GetTopologyPath returns the topology cell at a given path.
+ GetTopologyPath(ctx context.Context, in *vtctldata.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error)
// GetVersion returns the version of a tablet from its debug vars.
GetVersion(ctx context.Context, in *vtctldata.GetVersionRequest, opts ...grpc.CallOption) (*vtctldata.GetVersionResponse, error)
// GetVSchema returns the vschema for a keyspace.
@@ -390,6 +392,8 @@ type VtctldClient interface {
ValidateShard(ctx context.Context, in *vtctldata.ValidateShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(ctx context.Context, in *vtctldata.ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(ctx context.Context, in *vtctldata.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error)
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences.
ValidateVSchema(ctx context.Context, in *vtctldata.ValidateVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVSchemaResponse, error)
}
@@ -808,6 +812,15 @@ func (c *vtctldClient) GetTablets(ctx context.Context, in *vtctldata.GetTabletsR
return out, nil
}
+func (c *vtctldClient) GetTopologyPath(ctx context.Context, in *vtctldata.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldata.GetTopologyPathResponse, error) {
+ out := new(vtctldata.GetTopologyPathResponse)
+ err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetTopologyPath", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vtctldClient) GetVersion(ctx context.Context, in *vtctldata.GetVersionRequest, opts ...grpc.CallOption) (*vtctldata.GetVersionResponse, error) {
out := new(vtctldata.GetVersionResponse)
err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetVersion", in, out, opts...)
@@ -1191,6 +1204,15 @@ func (c *vtctldClient) ValidateVersionKeyspace(ctx context.Context, in *vtctldat
return out, nil
}
+func (c *vtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldata.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error) {
+ out := new(vtctldata.ValidateVersionShardResponse)
+ err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ValidateVersionShard", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *vtctldClient) ValidateVSchema(ctx context.Context, in *vtctldata.ValidateVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVSchemaResponse, error) {
out := new(vtctldata.ValidateVSchemaResponse)
err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ValidateVSchema", in, out, opts...)
@@ -1312,6 +1334,8 @@ type VtctldServer interface {
GetTablet(context.Context, *vtctldata.GetTabletRequest) (*vtctldata.GetTabletResponse, error)
// GetTablets returns tablets, optionally filtered by keyspace and shard.
GetTablets(context.Context, *vtctldata.GetTabletsRequest) (*vtctldata.GetTabletsResponse, error)
+ // GetTopologyPath returns the topology cell at a given path.
+ GetTopologyPath(context.Context, *vtctldata.GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error)
// GetVersion returns the version of a tablet from its debug vars.
GetVersion(context.Context, *vtctldata.GetVersionRequest) (*vtctldata.GetVersionResponse, error)
// GetVSchema returns the vschema for a keyspace.
@@ -1458,6 +1482,8 @@ type VtctldServer interface {
ValidateShard(context.Context, *vtctldata.ValidateShardRequest) (*vtctldata.ValidateShardResponse, error)
// ValidateVersionKeyspace validates that the version on the primary of shard 0 matches all of the other tablets in the keyspace.
ValidateVersionKeyspace(context.Context, *vtctldata.ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error)
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ ValidateVersionShard(context.Context, *vtctldata.ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error)
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences.
ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error)
mustEmbedUnimplementedVtctldServer()
@@ -1587,6 +1613,9 @@ func (UnimplementedVtctldServer) GetTablet(context.Context, *vtctldata.GetTablet
func (UnimplementedVtctldServer) GetTablets(context.Context, *vtctldata.GetTabletsRequest) (*vtctldata.GetTabletsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTablets not implemented")
}
+func (UnimplementedVtctldServer) GetTopologyPath(context.Context, *vtctldata.GetTopologyPathRequest) (*vtctldata.GetTopologyPathResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetTopologyPath not implemented")
+}
func (UnimplementedVtctldServer) GetVersion(context.Context, *vtctldata.GetVersionRequest) (*vtctldata.GetVersionResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented")
}
@@ -1707,6 +1736,9 @@ func (UnimplementedVtctldServer) ValidateShard(context.Context, *vtctldata.Valid
func (UnimplementedVtctldServer) ValidateVersionKeyspace(context.Context, *vtctldata.ValidateVersionKeyspaceRequest) (*vtctldata.ValidateVersionKeyspaceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionKeyspace not implemented")
}
+func (UnimplementedVtctldServer) ValidateVersionShard(context.Context, *vtctldata.ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ValidateVersionShard not implemented")
+}
func (UnimplementedVtctldServer) ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ValidateVSchema not implemented")
}
@@ -2449,6 +2481,24 @@ func _Vtctld_GetTablets_Handler(srv interface{}, ctx context.Context, dec func(i
return interceptor(ctx, in, info, handler)
}
+func _Vtctld_GetTopologyPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(vtctldata.GetTopologyPathRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VtctldServer).GetTopologyPath(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtctlservice.Vtctld/GetTopologyPath",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VtctldServer).GetTopologyPath(ctx, req.(*vtctldata.GetTopologyPathRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Vtctld_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(vtctldata.GetVersionRequest)
if err := dec(in); err != nil {
@@ -3172,6 +3222,24 @@ func _Vtctld_ValidateVersionKeyspace_Handler(srv interface{}, ctx context.Contex
return interceptor(ctx, in, info, handler)
}
+func _Vtctld_ValidateVersionShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(vtctldata.ValidateVersionShardRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VtctldServer).ValidateVersionShard(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/vtctlservice.Vtctld/ValidateVersionShard",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VtctldServer).ValidateVersionShard(ctx, req.(*vtctldata.ValidateVersionShardRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Vtctld_ValidateVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(vtctldata.ValidateVSchemaRequest)
if err := dec(in); err != nil {
@@ -3349,6 +3417,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetTablets",
Handler: _Vtctld_GetTablets_Handler,
},
+ {
+ MethodName: "GetTopologyPath",
+ Handler: _Vtctld_GetTopologyPath_Handler,
+ },
{
MethodName: "GetVersion",
Handler: _Vtctld_GetVersion_Handler,
@@ -3505,6 +3577,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{
MethodName: "ValidateVersionKeyspace",
Handler: _Vtctld_ValidateVersionKeyspace_Handler,
},
+ {
+ MethodName: "ValidateVersionShard",
+ Handler: _Vtctld_ValidateVersionShard_Handler,
+ },
{
MethodName: "ValidateVSchema",
Handler: _Vtctld_ValidateVSchema_Handler,
diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go
index 11914d8f3fb..fdc959e903c 100644
--- a/go/vt/proto/vtgate/vtgate.pb.go
+++ b/go/vt/proto/vtgate/vtgate.pb.go
@@ -988,7 +988,9 @@ type VStreamFlags struct {
StopOnReshard bool `protobuf:"varint,3,opt,name=stop_on_reshard,json=stopOnReshard,proto3" json:"stop_on_reshard,omitempty"`
// if specified, these cells (comma-separated) are used to pick source tablets from.
// defaults to the cell of the vtgate serving the VStream API.
- Cells string `protobuf:"bytes,4,opt,name=cells,proto3" json:"cells,omitempty"`
+ Cells string `protobuf:"bytes,4,opt,name=cells,proto3" json:"cells,omitempty"`
+ CellPreference string `protobuf:"bytes,5,opt,name=cell_preference,json=cellPreference,proto3" json:"cell_preference,omitempty"`
+ TabletOrder string `protobuf:"bytes,6,opt,name=tablet_order,json=tabletOrder,proto3" json:"tablet_order,omitempty"`
}
func (x *VStreamFlags) Reset() {
@@ -1051,6 +1053,20 @@ func (x *VStreamFlags) GetCells() string {
return ""
}
+func (x *VStreamFlags) GetCellPreference() string {
+ if x != nil {
+ return x.CellPreference
+ }
+ return ""
+}
+
+func (x *VStreamFlags) GetTabletOrder() string {
+ if x != nil {
+ return x.TabletOrder
+ }
+ return ""
+}
+
// VStreamRequest is the payload for VStream.
type VStreamRequest struct {
state protoimpl.MessageState
@@ -1696,7 +1712,7 @@ var file_vtgate_proto_rawDesc = []byte{
0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74,
0x69, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61,
0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67,
+ 0x22, 0xec, 0x01, 0x0a, 0x0c, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67,
0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x73, 0x6b,
0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69,
0x7a, 0x65, 0x53, 0x6b, 0x65, 0x77, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62,
@@ -1706,67 +1722,72 @@ var file_vtgate_proto_rawDesc = []byte{
0x5f, 0x72, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d,
0x73, 0x74, 0x6f, 0x70, 0x4f, 0x6e, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a,
0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65,
- 0x6c, 0x6c, 0x73, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70,
- 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76,
- 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e,
- 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76,
- 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
- 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0f,
- 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x0e,
- 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c,
- 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07,
- 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07,
- 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42,
- 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x6e, 0x0a, 0x13,
- 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
- 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x0a, 0x14,
- 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2a, 0x44, 0x0a, 0x0f, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f,
- 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d,
- 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x57, 0x4f, 0x50, 0x43, 0x10,
- 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72,
- 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03,
- 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12,
- 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x42,
- 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69,
- 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6c, 0x6c, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x65, 0x66,
+ 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x65,
+ 0x6c, 0x6c, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22,
+ 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
+ 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64,
+ 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x05,
+ 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x74,
+ 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67,
+ 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69,
+ 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52,
+ 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70,
+ 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08,
+ 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61,
+ 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a,
+ 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74,
+ 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x73,
+ 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a,
+ 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52,
+ 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x6c, 0x6f, 0x73,
+ 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2a, 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53,
+ 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x4c, 0x54, 0x49,
+ 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x57, 0x4f, 0x50, 0x43, 0x10, 0x03, 0x2a, 0x3c, 0x0a,
+ 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06,
+ 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x52, 0x45, 0x10,
+ 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41,
+ 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x42, 0x36, 0x0a, 0x0f, 0x69,
+ 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x23,
+ 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x67,
+ 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/proto/vtgate/vtgate_vtproto.pb.go b/go/vt/proto/vtgate/vtgate_vtproto.pb.go
index 8f05e8d01ab..1ddccaaf169 100644
--- a/go/vt/proto/vtgate/vtgate_vtproto.pb.go
+++ b/go/vt/proto/vtgate/vtgate_vtproto.pb.go
@@ -910,6 +910,20 @@ func (m *VStreamFlags) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if len(m.TabletOrder) > 0 {
+ i -= len(m.TabletOrder)
+ copy(dAtA[i:], m.TabletOrder)
+ i = encodeVarint(dAtA, i, uint64(len(m.TabletOrder)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.CellPreference) > 0 {
+ i -= len(m.CellPreference)
+ copy(dAtA[i:], m.CellPreference)
+ i = encodeVarint(dAtA, i, uint64(len(m.CellPreference)))
+ i--
+ dAtA[i] = 0x2a
+ }
if len(m.Cells) > 0 {
i -= len(m.Cells)
copy(dAtA[i:], m.Cells)
@@ -1667,6 +1681,14 @@ func (m *VStreamFlags) SizeVT() (n int) {
if l > 0 {
n += 1 + l + sov(uint64(l))
}
+ l = len(m.CellPreference)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
+ l = len(m.TabletOrder)
+ if l > 0 {
+ n += 1 + l + sov(uint64(l))
+ }
if m.unknownFields != nil {
n += len(m.unknownFields)
}
@@ -4244,6 +4266,70 @@ func (m *VStreamFlags) UnmarshalVT(dAtA []byte) error {
}
m.Cells = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CellPreference", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CellPreference = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TabletOrder", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLength
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TabletOrder = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go
index da29005747c..1988d2b7561 100644
--- a/go/vt/proto/vtrpc/vtrpc.pb.go
+++ b/go/vt/proto/vtrpc/vtrpc.pb.go
@@ -39,7 +39,9 @@ const (
// Code represents canonical error codes. The names, numbers and comments
// must match the ones defined by grpc (0-16):
-// https://godoc.org/google.golang.org/grpc/codes.
+//
+// https://godoc.org/google.golang.org/grpc/codes.
+//
// 17+ are custom codes
type Code int32
@@ -88,18 +90,19 @@ const (
//
// A litmus test that may help a service implementor in deciding
// between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
- // (a) Use UNAVAILABLE if the client can retry just the failing call.
- // (b) Use ABORTED if the client should retry at a higher-level
- // (e.g., restarting a read-modify-write sequence).
- // (c) Use FAILED_PRECONDITION if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
- // fails because the directory is non-empty, FAILED_PRECONDITION
- // should be returned since the client should not retry unless
- // they have first fixed up the directory by deleting files from it.
- // (d) Use FAILED_PRECONDITION if the client performs conditional
- // REST Get/Update/Delete on a resource and the resource on the
- // server does not match the condition. E.g., conflicting
- // read-modify-write on the same resource.
+ //
+ // (a) Use UNAVAILABLE if the client can retry just the failing call.
+ // (b) Use ABORTED if the client should retry at a higher-level
+ // (e.g., restarting a read-modify-write sequence).
+ // (c) Use FAILED_PRECONDITION if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, FAILED_PRECONDITION
+ // should be returned since the client should not retry unless
+ // they have first fixed up the directory by deleting files from it.
+ // (d) Use FAILED_PRECONDITION if the client performs conditional
+ // REST Get/Update/Delete on a resource and the resource on the
+ // server does not match the condition. E.g., conflicting
+ // read-modify-write on the same resource.
Code_FAILED_PRECONDITION Code = 9
// ABORTED indicates the operation was aborted, typically due to a
// concurrency issue like sequencer check failures, transaction aborts,
diff --git a/go/vt/schema/ddl_strategy.go b/go/vt/schema/ddl_strategy.go
index 05e2e15c1f5..26d1878c95c 100644
--- a/go/vt/schema/ddl_strategy.go
+++ b/go/vt/schema/ddl_strategy.go
@@ -19,12 +19,15 @@ package schema
import (
"fmt"
"regexp"
+ "strconv"
+ "time"
"github.com/google/shlex"
)
var (
- strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`)
+ strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`)
+ retainArtifactsFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, retainArtifactsFlag))
)
const (
@@ -38,6 +41,7 @@ const (
allowConcurrentFlag = "allow-concurrent"
fastOverRevertibleFlag = "fast-over-revertible"
fastRangeRotationFlag = "fast-range-rotation"
+ retainArtifactsFlag = "retain-artifacts"
vreplicationTestSuite = "vreplication-test-suite"
)
@@ -98,6 +102,9 @@ func ParseDDLStrategy(strategyVariable string) (*DDLStrategySetting, error) {
default:
return nil, fmt.Errorf("Unknown online DDL strategy: '%v'", strategy)
}
+ if _, err := setting.RetainArtifactsDuration(); err != nil {
+ return nil, err
+ }
return setting, nil
}
@@ -168,7 +175,34 @@ func (setting *DDLStrategySetting) IsFastRangeRotationFlag() bool {
return setting.hasFlag(fastRangeRotationFlag)
}
-// IsVreplicationTestSuite checks if strategy options include -vreplicatoin-test-suite
+// isRetainArtifactsFlag returns true when given option denotes a `--retain-artifacts=[...]` flag
+func isRetainArtifactsFlag(opt string) (string, bool) {
+ submatch := retainArtifactsFlagRegexp.FindStringSubmatch(opt)
+ if len(submatch) == 0 {
+ return "", false
+ }
+ return submatch[1], true
+}
+
+// RetainArtifactsDuration returns a the duration indicated by --retain-artifacts
+func (setting *DDLStrategySetting) RetainArtifactsDuration() (d time.Duration, err error) {
+ // We do some ugly manual parsing of --retain-artifacts
+ opts, _ := shlex.Split(setting.Options)
+ for _, opt := range opts {
+ if val, isRetainArtifacts := isRetainArtifactsFlag(opt); isRetainArtifacts {
+ // value is possibly quoted
+ if s, err := strconv.Unquote(val); err == nil {
+ val = s
+ }
+ if val != "" {
+ d, err = time.ParseDuration(val)
+ }
+ }
+ }
+ return d, err
+}
+
+// IsVreplicationTestSuite checks if strategy options include --vreplicatoin-test-suite
func (setting *DDLStrategySetting) IsVreplicationTestSuite() bool {
return setting.hasFlag(vreplicationTestSuite)
}
@@ -178,6 +212,9 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string {
opts, _ := shlex.Split(setting.Options)
validOpts := []string{}
for _, opt := range opts {
+ if _, ok := isRetainArtifactsFlag(opt); ok {
+ continue
+ }
switch {
case isFlag(opt, declarativeFlag):
case isFlag(opt, skipTopoFlag):
diff --git a/go/vt/schema/ddl_strategy_test.go b/go/vt/schema/ddl_strategy_test.go
index 8a700655e51..d8723ee2c57 100644
--- a/go/vt/schema/ddl_strategy_test.go
+++ b/go/vt/schema/ddl_strategy_test.go
@@ -19,6 +19,7 @@ package schema
import (
"strings"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
)
@@ -37,6 +38,69 @@ func TestIsDirect(t *testing.T) {
assert.True(t, DDLStrategy("something").IsDirect())
}
+func TestIsExpireArtifactsFlag(t *testing.T) {
+ tt := []struct {
+ s string
+ expect bool
+ val string
+ d time.Duration
+ }{
+ {
+ s: "something",
+ },
+ {
+ s: "-retain-artifacts",
+ },
+ {
+ s: "--retain-artifacts",
+ },
+ {
+ s: "--retain-artifacts=",
+ expect: true,
+ },
+ {
+ s: "--retain-artifacts=0",
+ expect: true,
+ val: "0",
+ d: 0,
+ },
+ {
+ s: "-retain-artifacts=0",
+ expect: true,
+ val: "0",
+ d: 0,
+ },
+ {
+ s: "--retain-artifacts=1m",
+ expect: true,
+ val: "1m",
+ d: time.Minute,
+ },
+ {
+ s: `--retain-artifacts="1m"`,
+ expect: true,
+ val: `"1m"`,
+ d: time.Minute,
+ },
+ }
+ for _, ts := range tt {
+ t.Run(ts.s, func(t *testing.T) {
+ setting, err := ParseDDLStrategy("online " + ts.s)
+ assert.NoError(t, err)
+
+ val, isRetainArtifacts := isRetainArtifactsFlag(ts.s)
+ assert.Equal(t, ts.expect, isRetainArtifacts)
+ assert.Equal(t, ts.val, val)
+
+ if ts.expect {
+ d, err := setting.RetainArtifactsDuration()
+ assert.NoError(t, err)
+ assert.Equal(t, ts.d, d)
+ }
+ })
+ }
+}
+
func TestParseDDLStrategy(t *testing.T) {
tt := []struct {
strategyVariable string
@@ -49,6 +113,7 @@ func TestParseDDLStrategy(t *testing.T) {
isAllowConcurrent bool
fastOverRevertible bool
fastRangeRotation bool
+ expireArtifacts time.Duration
runtimeOptions string
err error
}{
@@ -145,6 +210,13 @@ func TestParseDDLStrategy(t *testing.T) {
runtimeOptions: "",
fastRangeRotation: true,
},
+ {
+ strategyVariable: "vitess --retain-artifacts=4m",
+ strategy: DDLStrategyVitess,
+ options: "--retain-artifacts=4m",
+ runtimeOptions: "",
+ expireArtifacts: 4 * time.Minute,
+ },
}
for _, ts := range tt {
setting, err := ParseDDLStrategy(ts.strategyVariable)
@@ -166,4 +238,8 @@ func TestParseDDLStrategy(t *testing.T) {
_, err := ParseDDLStrategy("other")
assert.Error(t, err)
}
+ {
+ _, err := ParseDDLStrategy("online --retain-artifacts=3")
+ assert.Error(t, err)
+ }
}
diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go
index 07740039004..aff15a14f89 100644
--- a/go/vt/schema/online_ddl.go
+++ b/go/vt/schema/online_ddl.go
@@ -269,6 +269,11 @@ func OnlineDDLFromCommentedStatement(stmt sqlparser.Statement) (onlineDDL *Onlin
default:
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported statement for Online DDL: %v", sqlparser.String(stmt))
}
+ // We clone the comments because they will end up being cached by the query planner. Then, the Directive() function actually modifies the comments.
+ // If comments are shared in cache, and Directive() modifies it, then we have a concurrency issue when someone else wants to read the comments.
+ // By cloning the comments we remove the concurrency problem.
+ comments = sqlparser.CloneRefOfParsedComments(comments)
+ comments.ResetDirectives()
if comments.Length() == 0 {
return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no comments found in statement: %v", sqlparser.String(stmt))
diff --git a/go/vt/schemamanager/schemamanager.go b/go/vt/schemamanager/schemamanager.go
index 1ccc52d6a6d..e0063e48357 100644
--- a/go/vt/schemamanager/schemamanager.go
+++ b/go/vt/schemamanager/schemamanager.go
@@ -17,12 +17,11 @@ limitations under the License.
package schemamanager
import (
+ "context"
"encoding/json"
"fmt"
"time"
- "context"
-
"vitess.io/vitess/go/vt/log"
querypb "vitess.io/vitess/go/vt/proto/query"
)
diff --git a/go/vt/servenv/buildinfo.go b/go/vt/servenv/buildinfo.go
index c03c85009ed..d00b4f063ff 100644
--- a/go/vt/servenv/buildinfo.go
+++ b/go/vt/servenv/buildinfo.go
@@ -40,7 +40,7 @@ var (
)
func registerVersionFlag(fs *pflag.FlagSet) {
- fs.BoolVar(&version, "version", version, "print binary version")
+ fs.BoolVarP(&version, "version", "v", version, "print binary version")
}
// AppVersion is the struct to store build info.
diff --git a/go/vt/servenv/buildinfo_test.go b/go/vt/servenv/buildinfo_test.go
index 15b2bd4ec80..8d354448077 100644
--- a/go/vt/servenv/buildinfo_test.go
+++ b/go/vt/servenv/buildinfo_test.go
@@ -33,17 +33,17 @@ func TestVersionString(t *testing.T) {
buildTimePretty: "time is now",
buildGitRev: "d54b87ca0be09b678bb4490060e8f23f890ddb92",
buildGitBranch: "gitBranch",
- goVersion: "1.18.5",
+ goVersion: "1.18.9",
goOS: "amiga",
goArch: "amd64",
version: "v1.2.3-SNAPSHOT",
}
- assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.18.5 amiga/amd64", v.String())
+ assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.18.9 amiga/amd64", v.String())
v.jenkinsBuildNumber = 422
- assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.18.5 amiga/amd64", v.String())
+ assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.18.9 amiga/amd64", v.String())
assert.Equal(t, "5.7.9-vitess-v1.2.3-SNAPSHOT", v.MySQLVersion())
}
diff --git a/go/vt/servenv/exporter.go b/go/vt/servenv/exporter.go
index 397be415581..d8eb4ef428d 100644
--- a/go/vt/servenv/exporter.go
+++ b/go/vt/servenv/exporter.go
@@ -102,6 +102,7 @@ type Exporter struct {
name, label string
handleFuncs map[string]*handleFunc
sp *statusPage
+ mu sync.Mutex
}
// NewExporter creates a new Exporter with name as namespace.
@@ -154,6 +155,8 @@ func (e *Exporter) URLPrefix() string {
// url remapped from /path to /name/path. If name is empty, the request
// is passed through to http.HandleFunc.
func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http.Request)) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
if e.name == "" {
http.HandleFunc(url, f)
return
diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go
index 97749bf7efc..1eff867a104 100644
--- a/go/vt/servenv/grpc_server.go
+++ b/go/vt/servenv/grpc_server.go
@@ -207,10 +207,12 @@ func createGRPCServer() {
// grpc: received message length XXXXXXX exceeding the max size 4194304
// Note: For gRPC 1.0.0 it's sufficient to set the limit on the server only
// because it's not enforced on the client side.
- msgSize := grpccommon.MaxMessageSize()
- log.Infof("Setting grpc max message size to %d", msgSize)
- opts = append(opts, grpc.MaxRecvMsgSize(msgSize))
- opts = append(opts, grpc.MaxSendMsgSize(msgSize))
+
+ maxSendSize := grpccommon.MaxMessageSendSize()
+ maxRecvSize := grpccommon.MaxMessageRecvSize()
+ log.Infof("Setting grpc server max message sizes to %d (sending), %d (receiving)", maxSendSize, maxRecvSize)
+ opts = append(opts, grpc.MaxRecvMsgSize(maxRecvSize))
+ opts = append(opts, grpc.MaxSendMsgSize(maxSendSize))
if gRPCInitialConnWindowSize != 0 {
log.Infof("Setting grpc server initial conn window size to %d", int32(gRPCInitialConnWindowSize))
diff --git a/go/vt/servenv/mysql.go b/go/vt/servenv/mysql.go
index e74d4d07acf..e5d37bd5391 100644
--- a/go/vt/servenv/mysql.go
+++ b/go/vt/servenv/mysql.go
@@ -51,7 +51,6 @@ func init() {
"vtbackup",
"vtcombo",
"vtctl",
- "vtctld",
"vtctldclient",
"vtexplain",
"vtgate",
diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go
index 5c851b295ee..7509c777767 100644
--- a/go/vt/servenv/servenv.go
+++ b/go/vt/servenv/servenv.go
@@ -78,8 +78,9 @@ var (
var (
lameduckPeriod = 50 * time.Millisecond
onTermTimeout = 10 * time.Second
- onCloseTimeout = time.Nanosecond
+ onCloseTimeout = 10 * time.Second
catchSigpipe bool
+ usePSLogger bool
)
// RegisterFlags installs the flags used by Init, Run, and RunDefault.
@@ -95,6 +96,8 @@ func RegisterFlags() {
// pid_file.go
fs.StringVar(&pidFile, "pid_file", pidFile, "If set, the process will write its pid to the named file, and delete it on graceful shutdown.")
+ // Logging
+ fs.BoolVar(&usePSLogger, "structured-logging", usePSLogger, "whether to use structured logging (PlanetScale Log) logger or the original (glog) logger")
})
}
@@ -103,6 +106,11 @@ func Init() {
mu.Lock()
defer mu.Unlock()
+ // Uptime metric
+ _ = stats.NewGaugeFunc("Uptime", "Uptime in nanoseconds", func() int64 {
+ return int64(time.Since(serverStart).Nanoseconds())
+ })
+
// Ignore SIGPIPE if specified
// The Go runtime catches SIGPIPE for us on all fds except stdout/stderr
// See https://golang.org/pkg/os/signal/#hdr-SIGPIPE
@@ -314,11 +322,21 @@ func ParseFlags(cmd string) {
os.Exit(0)
}
+ if usePSLogger {
+ // Replace glog logger with PlanetScale logger
+ _, err := logutil.SetPlanetScaleLogger(nil)
+ if err != nil {
+ log.Exitf("error while setting the PlanetScale logger: %s", err)
+ }
+ }
+
args := fs.Args()
if len(args) > 0 {
_flag.Usage()
log.Exitf("%s doesn't take any positional arguments, got '%s'", cmd, strings.Join(args, " "))
}
+
+ logutil.PurgeLogs()
}
// GetFlagSetFor returns the flag set for a given command.
@@ -348,6 +366,8 @@ func ParseFlagsWithArgs(cmd string) []string {
log.Exitf("%s expected at least one positional argument", cmd)
}
+ logutil.PurgeLogs()
+
return args
}
@@ -385,6 +405,19 @@ func init() {
OnParseFor(cmd, grpccommon.RegisterFlags)
}
+ // These are the binaries that export stats
+ for _, cmd := range []string{
+ "vtbackup",
+ "vtcombo",
+ "vtctld",
+ "vtgate",
+ "vtgr",
+ "vttablet",
+ "vtorc",
+ } {
+ OnParseFor(cmd, stats.RegisterFlags)
+ }
+
// Flags in package log are installed for all binaries.
OnParse(log.RegisterFlags)
// Flags in package logutil are installed for all binaries.
diff --git a/go/vt/servenv/servenv_test.go b/go/vt/servenv/servenv_test.go
index 3d835fcea1a..b7bd874989a 100644
--- a/go/vt/servenv/servenv_test.go
+++ b/go/vt/servenv/servenv_test.go
@@ -65,9 +65,7 @@ func TestFireOnCloseHooksTimeout(t *testing.T) {
time.Sleep(1 * time.Second)
})
- // we deliberatly test the flag to make sure it's not accidently set to a
- // high value.
- if finished, want := fireOnCloseHooks(onCloseTimeout), false; finished != want {
+ if finished, want := fireOnCloseHooks(1*time.Nanosecond), false; finished != want {
t.Errorf("finished = %v, want %v", finished, want)
}
}
diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go
index 8585fdd55ee..caee080dc12 100644
--- a/go/vt/servenv/version.go
+++ b/go/vt/servenv/version.go
@@ -1,5 +1,5 @@
/*
-Copyright 2022 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES
-// DO NOT EDIT
-
package servenv
-const versionName = "15.0.0-SNAPSHOT"
+// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY ./tools/do_releases.sh
+// DO NOT EDIT
+
+const versionName = "15.0.5"
diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go
index 8ece2749788..afa130eb279 100644
--- a/go/vt/sqlparser/ast_funcs.go
+++ b/go/vt/sqlparser/ast_funcs.go
@@ -210,97 +210,101 @@ func (ct *ColumnType) DescribeType() string {
// SQLType returns the sqltypes type code for the given column
func (ct *ColumnType) SQLType() querypb.Type {
- switch strings.ToLower(ct.Type) {
- case keywordStrings[TINYINT]:
- if ct.Unsigned {
+ return SQLTypeToQueryType(ct.Type, ct.Unsigned)
+}
+
+func SQLTypeToQueryType(typeName string, unsigned bool) querypb.Type {
+ switch keywordVals[strings.ToLower(typeName)] {
+ case TINYINT:
+ if unsigned {
return sqltypes.Uint8
}
return sqltypes.Int8
- case keywordStrings[SMALLINT]:
- if ct.Unsigned {
+ case SMALLINT:
+ if unsigned {
return sqltypes.Uint16
}
return sqltypes.Int16
- case keywordStrings[MEDIUMINT]:
- if ct.Unsigned {
+ case MEDIUMINT:
+ if unsigned {
return sqltypes.Uint24
}
return sqltypes.Int24
- case keywordStrings[INT], keywordStrings[INTEGER]:
- if ct.Unsigned {
+ case INT, INTEGER:
+ if unsigned {
return sqltypes.Uint32
}
return sqltypes.Int32
- case keywordStrings[BIGINT]:
- if ct.Unsigned {
+ case BIGINT:
+ if unsigned {
return sqltypes.Uint64
}
return sqltypes.Int64
- case keywordStrings[BOOL], keywordStrings[BOOLEAN]:
+ case BOOL, BOOLEAN:
return sqltypes.Uint8
- case keywordStrings[TEXT]:
+ case TEXT:
return sqltypes.Text
- case keywordStrings[TINYTEXT]:
+ case TINYTEXT:
return sqltypes.Text
- case keywordStrings[MEDIUMTEXT]:
+ case MEDIUMTEXT:
return sqltypes.Text
- case keywordStrings[LONGTEXT]:
+ case LONGTEXT:
return sqltypes.Text
- case keywordStrings[BLOB]:
+ case BLOB:
return sqltypes.Blob
- case keywordStrings[TINYBLOB]:
+ case TINYBLOB:
return sqltypes.Blob
- case keywordStrings[MEDIUMBLOB]:
+ case MEDIUMBLOB:
return sqltypes.Blob
- case keywordStrings[LONGBLOB]:
+ case LONGBLOB:
return sqltypes.Blob
- case keywordStrings[CHAR]:
+ case CHAR:
return sqltypes.Char
- case keywordStrings[VARCHAR]:
+ case VARCHAR:
return sqltypes.VarChar
- case keywordStrings[BINARY]:
+ case BINARY:
return sqltypes.Binary
- case keywordStrings[VARBINARY]:
+ case VARBINARY:
return sqltypes.VarBinary
- case keywordStrings[DATE]:
+ case DATE:
return sqltypes.Date
- case keywordStrings[TIME]:
+ case TIME:
return sqltypes.Time
- case keywordStrings[DATETIME]:
+ case DATETIME:
return sqltypes.Datetime
- case keywordStrings[TIMESTAMP]:
+ case TIMESTAMP:
return sqltypes.Timestamp
- case keywordStrings[YEAR]:
+ case YEAR:
return sqltypes.Year
- case keywordStrings[FLOAT_TYPE]:
+ case FLOAT_TYPE:
return sqltypes.Float32
- case keywordStrings[DOUBLE]:
+ case DOUBLE:
return sqltypes.Float64
- case keywordStrings[DECIMAL]:
+ case DECIMAL, DECIMAL_TYPE:
return sqltypes.Decimal
- case keywordStrings[BIT]:
+ case BIT:
return sqltypes.Bit
- case keywordStrings[ENUM]:
+ case ENUM:
return sqltypes.Enum
- case keywordStrings[SET]:
+ case SET:
return sqltypes.Set
- case keywordStrings[JSON]:
+ case JSON:
return sqltypes.TypeJSON
- case keywordStrings[GEOMETRY]:
+ case GEOMETRY:
return sqltypes.Geometry
- case keywordStrings[POINT]:
+ case POINT:
return sqltypes.Geometry
- case keywordStrings[LINESTRING]:
+ case LINESTRING:
return sqltypes.Geometry
- case keywordStrings[POLYGON]:
+ case POLYGON:
return sqltypes.Geometry
- case keywordStrings[GEOMETRYCOLLECTION]:
+ case GEOMETRYCOLLECTION:
return sqltypes.Geometry
- case keywordStrings[MULTIPOINT]:
+ case MULTIPOINT:
return sqltypes.Geometry
- case keywordStrings[MULTILINESTRING]:
+ case MULTILINESTRING:
return sqltypes.Geometry
- case keywordStrings[MULTIPOLYGON]:
+ case MULTIPOLYGON:
return sqltypes.Geometry
}
return sqltypes.Null
@@ -987,10 +991,8 @@ func (node *Select) AddHaving(expr Expr) {
}
return
}
- node.Having.Expr = &AndExpr{
- Left: node.Having.Expr,
- Right: expr,
- }
+ exprs := SplitAndExpression(nil, node.Having.Expr)
+ node.Having.Expr = AndExpressions(append(exprs, expr)...)
}
// AddGroupBy adds a grouping expression, unless it's already present
diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go
index 5ecdea8896b..c60d2f6e3c2 100644
--- a/go/vt/sqlparser/ast_rewriting.go
+++ b/go/vt/sqlparser/ast_rewriting.go
@@ -496,13 +496,20 @@ var funcRewrites = map[string]string{
}
func (er *astRewriter) funcRewrite(cursor *Cursor, node *FuncExpr) {
- bindVar, found := funcRewrites[node.Name.Lowered()]
+ lowered := node.Name.Lowered()
+ if lowered == "last_insert_id" && len(node.Exprs) > 0 {
+ // if we are dealing with is LAST_INSERT_ID() with an argument, we don't need to rewrite it.
+ // with an argument, this is an identity function that will update the session state and
+ // sets the correct fields in the OK TCP packet that we send back
+ return
+ }
+ bindVar, found := funcRewrites[lowered]
if found {
if bindVar == DBVarName && !er.shouldRewriteDatabaseFunc {
return
}
if len(node.Exprs) > 0 {
- er.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Argument to %s() not supported", node.Name.Lowered())
+ er.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Argument to %s() not supported", lowered)
return
}
cursor.Replace(bindVarExpression(bindVar))
diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go
index 71c56594875..0d6841755c8 100644
--- a/go/vt/sqlparser/ast_test.go
+++ b/go/vt/sqlparser/ast_test.go
@@ -57,32 +57,22 @@ func TestSelect(t *testing.T) {
sel.AddWhere(expr)
buf := NewTrackedBuffer(nil)
sel.Where.Format(buf)
- want := " where a = 1"
- if buf.String() != want {
- t.Errorf("where: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " where a = 1", buf.String())
sel.AddWhere(expr)
buf = NewTrackedBuffer(nil)
sel.Where.Format(buf)
- want = " where a = 1"
- if buf.String() != want {
- t.Errorf("where: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " where a = 1", buf.String())
+
sel = &Select{}
sel.AddHaving(expr)
buf = NewTrackedBuffer(nil)
sel.Having.Format(buf)
- want = " having a = 1"
- if buf.String() != want {
- t.Errorf("having: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " having a = 1", buf.String())
+
sel.AddHaving(expr)
buf = NewTrackedBuffer(nil)
sel.Having.Format(buf)
- want = " having a = 1 and a = 1"
- if buf.String() != want {
- t.Errorf("having: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " having a = 1", buf.String())
tree, err = Parse("select * from t where a = 1 or b = 1")
require.NoError(t, err)
@@ -91,18 +81,14 @@ func TestSelect(t *testing.T) {
sel.AddWhere(expr)
buf = NewTrackedBuffer(nil)
sel.Where.Format(buf)
- want = " where a = 1 or b = 1"
- if buf.String() != want {
- t.Errorf("where: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " where a = 1 or b = 1", buf.String())
+
sel = &Select{}
sel.AddHaving(expr)
buf = NewTrackedBuffer(nil)
sel.Having.Format(buf)
- want = " having a = 1 or b = 1"
- if buf.String() != want {
- t.Errorf("having: %q, want %s", buf.String(), want)
- }
+ assert.Equal(t, " having a = 1 or b = 1", buf.String())
+
}
func TestUpdate(t *testing.T) {
@@ -835,3 +821,32 @@ func BenchmarkStringTraces(b *testing.B) {
})
}
}
+
+func TestCloneComments(t *testing.T) {
+ c := []string{"/*vt+ a=b */"}
+ parsedComments := Comments(c).Parsed()
+ directives := parsedComments.Directives()
+ {
+ assert.NotEmpty(t, directives.m)
+ val, ok := directives.m["a"]
+ assert.Truef(t, ok, "directives map: %v", directives.m)
+ assert.Equal(t, "b", val)
+ }
+ cloned := CloneRefOfParsedComments(parsedComments)
+ cloned.ResetDirectives()
+ clonedDirectives := cloned.Directives()
+ {
+ assert.NotEmpty(t, clonedDirectives.m)
+ val, ok := clonedDirectives.m["a"]
+ assert.Truef(t, ok, "directives map: %v", directives.m)
+ assert.Equal(t, "b", val)
+ }
+ {
+ delete(directives.m, "a")
+ assert.Empty(t, directives.m)
+
+ val, ok := clonedDirectives.m["a"]
+ assert.Truef(t, ok, "directives map: %v", directives.m)
+ assert.Equal(t, "b", val)
+ }
+}
diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go
index 528d0e250bd..413e95d5c40 100644
--- a/go/vt/sqlparser/comments.go
+++ b/go/vt/sqlparser/comments.go
@@ -20,6 +20,9 @@ import (
"strconv"
"strings"
"unicode"
+
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vterrors"
)
const (
@@ -44,8 +47,19 @@ const (
DirectiveQueryPlanner = "PLANNER"
// DirectiveVtexplainRunDMLQueries tells explain format = vtexplain that it is okay to also run the query.
DirectiveVtexplainRunDMLQueries = "EXECUTE_DML_QUERIES"
+ // DirectiveWorkloadName specifies the name of the client application workload issuing the query.
+ DirectiveWorkloadName = "WORKLOAD_NAME"
+ // DirectivePriority specifies the priority of a workload. It should be an integer between 0 and MaxPriorityValue,
+ // where 0 is the highest priority, and MaxPriorityValue is the lowest one.
+ DirectivePriority = "PRIORITY"
+
+ // MaxPriorityValue specifies the maximum value allowed for the priority query directive. Valid priority values are
+ // between zero and MaxPriorityValue.
+ MaxPriorityValue = 100
)
+var ErrInvalidPriority = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Invalid priority value specified in query")
+
func isNonSpace(r rune) bool {
return !unicode.IsSpace(r)
}
@@ -206,6 +220,15 @@ type CommentDirectives struct {
m map[string]string
}
+// ResetDirectives sets the _directives member to `nil`, which means the next call to Directives()
+// will re-evaluate it.
+func (c *ParsedComments) ResetDirectives() {
+ if c == nil {
+ return
+ }
+ c._directives = nil
+}
+
// Directives parses the comment list for any execution directives
// of the form:
//
@@ -369,3 +392,42 @@ func AllowScatterDirective(stmt Statement) bool {
}
return comments != nil && comments.Directives().IsSet(DirectiveAllowScatter)
}
+
+// GetPriorityFromStatement gets the priority from the provided Statement, using DirectivePriority
+func GetPriorityFromStatement(statement Statement) (string, error) {
+ commentedStatement, ok := statement.(Commented)
+ // This would mean that the statement lacks comments, so we can't obtain the workload from it. Hence default to
+ // empty priority
+ if !ok {
+ return "", nil
+ }
+
+ directives := commentedStatement.GetParsedComments().Directives()
+ priority, ok := directives.GetString(DirectivePriority, "")
+ if !ok || priority == "" {
+ return "", nil
+ }
+
+ intPriority, err := strconv.Atoi(priority)
+ if err != nil || intPriority < 0 || intPriority > MaxPriorityValue {
+ return "", ErrInvalidPriority
+ }
+
+ return priority, nil
+}
+
+// GetWorkloadNameFromStatement gets the workload name from the provided Statement, using workloadLabel as the name of
+// the query directive that specifies it.
+func GetWorkloadNameFromStatement(statement Statement) string {
+ commentedStatement, ok := statement.(Commented)
+ // This would mean that the statement lacks comments, so we can't obtain the workload from it. Hence default to
+ // empty workload name
+ if !ok {
+ return ""
+ }
+
+ directives := commentedStatement.GetParsedComments().Directives()
+ workloadName, _ := directives.GetString(DirectiveWorkloadName, "")
+
+ return workloadName
+}
diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go
index 4906b9fbcd7..b839f78df66 100644
--- a/go/vt/sqlparser/comments_test.go
+++ b/go/vt/sqlparser/comments_test.go
@@ -468,3 +468,68 @@ func TestIgnoreMaxMaxMemoryRowsDirective(t *testing.T) {
})
}
}
+
+func TestGetPriorityFromStatement(t *testing.T) {
+ testCases := []struct {
+ query string
+ expectedPriority string
+ expectedError error
+ }{
+ {
+ query: "select * from a_table",
+ expectedPriority: "",
+ expectedError: nil,
+ },
+ {
+ query: "select /*vt+ ANOTHER_DIRECTIVE=324 */ * from another_table",
+ expectedPriority: "",
+ expectedError: nil,
+ },
+ {
+ query: "select /*vt+ PRIORITY=33 */ * from another_table",
+ expectedPriority: "33",
+ expectedError: nil,
+ },
+ {
+ query: "select /*vt+ PRIORITY=200 */ * from another_table",
+ expectedPriority: "",
+ expectedError: ErrInvalidPriority,
+ },
+ {
+ query: "select /*vt+ PRIORITY=-1 */ * from another_table",
+ expectedPriority: "",
+ expectedError: ErrInvalidPriority,
+ },
+ {
+ query: "select /*vt+ PRIORITY=some_text */ * from another_table",
+ expectedPriority: "",
+ expectedError: ErrInvalidPriority,
+ },
+ {
+ query: "select /*vt+ PRIORITY=0 */ * from another_table",
+ expectedPriority: "0",
+ expectedError: nil,
+ },
+ {
+ query: "select /*vt+ PRIORITY=100 */ * from another_table",
+ expectedPriority: "100",
+ expectedError: nil,
+ },
+ }
+
+ for _, testCase := range testCases {
+ theThestCase := testCase
+ t.Run(theThestCase.query, func(t *testing.T) {
+ t.Parallel()
+ stmt, err := Parse(theThestCase.query)
+ assert.NoError(t, err)
+ actualPriority, actualError := GetPriorityFromStatement(stmt)
+ if theThestCase.expectedError != nil {
+ assert.ErrorIs(t, actualError, theThestCase.expectedError)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, theThestCase.expectedPriority, actualPriority)
+ }
+ })
+ }
+}
diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go
index ec980a05c65..d0a152f907c 100644
--- a/go/vt/sqlparser/keywords.go
+++ b/go/vt/sqlparser/keywords.go
@@ -687,6 +687,7 @@ var keywords = []keyword{
// keywordStrings contains the reverse mapping of token to keyword strings
var keywordStrings = map[int]string{}
+var keywordVals = map[string]int{}
// keywordLookupTable is a perfect hash map that maps **case insensitive** keyword names to their ids
var keywordLookupTable *caseInsensitiveTable
@@ -735,6 +736,7 @@ func init() {
panic(fmt.Sprintf("keyword %q must be lowercase in table", kw.name))
}
keywordStrings[kw.id] = kw.name
+ keywordVals[kw.name] = kw.id
}
keywordLookupTable = buildCaseInsensitiveTable(keywords)
diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go
index 6fb93c5778d..2188b303eda 100644
--- a/go/vt/sqlparser/normalizer.go
+++ b/go/vt/sqlparser/normalizer.go
@@ -42,10 +42,11 @@ func Normalize(stmt Statement, reserved *ReservedVars, bindVars map[string]*quer
}
type normalizer struct {
- bindVars map[string]*querypb.BindVariable
- reserved *ReservedVars
- vals map[string]string
- err error
+ bindVars map[string]*querypb.BindVariable
+ reserved *ReservedVars
+ vals map[string]string
+ err error
+ inDerived bool
}
func newNormalizer(reserved *ReservedVars, bindVars map[string]*querypb.BindVariable) *normalizer {
@@ -65,8 +66,12 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool {
case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, *SetTransaction, DDLStatement, *SRollback, *Release, *OtherAdmin, *OtherRead:
return false
case *Select:
+ _, isDerived := cursor.Parent().(*DerivedTable)
+ var tmp bool
+ tmp, nz.inDerived = nz.inDerived, isDerived
_ = Rewrite(node, nz.WalkSelect, nil)
// Don't continue
+ nz.inDerived = tmp
return false
case *Literal:
nz.convertLiteral(node, cursor)
@@ -85,6 +90,19 @@ func (nz *normalizer) WalkStatement(cursor *Cursor) bool {
// WalkSelect normalizes the AST in Select mode.
func (nz *normalizer) WalkSelect(cursor *Cursor) bool {
switch node := cursor.Node().(type) {
+ case *Select:
+ _, isDerived := cursor.Parent().(*DerivedTable)
+ if !isDerived {
+ return true
+ }
+ var tmp bool
+ tmp, nz.inDerived = nz.inDerived, isDerived
+ _ = Rewrite(node, nz.WalkSelect, nil)
+ // Don't continue
+ nz.inDerived = tmp
+ return false
+ case SelectExprs:
+ return !nz.inDerived
case *Literal:
nz.convertLiteralDedup(node, cursor)
case *ComparisonExpr:
diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go
index aa47f1e5634..ab953084d71 100644
--- a/go/vt/sqlparser/normalizer_test.go
+++ b/go/vt/sqlparser/normalizer_test.go
@@ -284,6 +284,14 @@ func TestNormalize(t *testing.T) {
outbv: map[string]*querypb.BindVariable{
"bv1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Datetime, []byte("2022-08-06 17:05:12"))),
},
+ }, {
+ // we don't want to replace literals on the select expressions of a derived table
+ // these expressions can be referenced from the outside,
+ // and changing them to bindvars can change the meaning of the query
+ // example of problematic query: select tmp.`1` from (select 1) as tmp
+ in: `select * from (select 12) as t`,
+ outstmt: `select * from (select 12 from dual) as t`,
+ outbv: map[string]*querypb.BindVariable{},
}}
for _, tc := range testcases {
t.Run(tc.in, func(t *testing.T) {
diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go
index bb3ab071820..b1e660562eb 100644
--- a/go/vt/sqlparser/parse_test.go
+++ b/go/vt/sqlparser/parse_test.go
@@ -850,6 +850,12 @@ var (
}, {
input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t",
output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t",
+ }, {
+ input: "select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)",
+ output: "select DATE_ADD(min(FROM_UNIXTIME(1673444922)), interval (-DAYOFWEEK(min(FROM_UNIXTIME(1673444922))) + 1) DAY) from dual",
+ }, {
+ input: "select '2020-01-01' + interval month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month))-1 month",
+ output: "select '2020-01-01' + interval (month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month)) - 1) month from dual",
}, {
input: "select /* dual */ 1 from dual",
}, {
@@ -5413,17 +5419,7 @@ var (
"(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
"F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
"(F(F(F(F(F(F(F(F(F(F(F(F(",
- output: "max nesting level reached at position 406",
- }, {
- input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
- "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
- "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" +
- "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" +
- "(F(F(F(F(F(F(F(F(F(F(F(",
- output: "syntax error at position 404",
+ output: "syntax error at position 406",
}, {
// This construct is considered invalid due to a grammar conflict.
input: "insert into a select * from b join c on duplicate key update d=e",
diff --git a/go/vt/sqlparser/precedence.go b/go/vt/sqlparser/precedence.go
index e1beafef816..d63a56b62ef 100644
--- a/go/vt/sqlparser/precedence.go
+++ b/go/vt/sqlparser/precedence.go
@@ -59,7 +59,7 @@ func precedenceFor(in Expr) Precendence {
return P12
case *ComparisonExpr:
switch node.Operator {
- case EqualOp, NotEqualOp, GreaterThanOp, GreaterEqualOp, LessThanOp, LessEqualOp, LikeOp, InOp, RegexpOp:
+ case EqualOp, NotEqualOp, GreaterThanOp, GreaterEqualOp, LessThanOp, LessEqualOp, LikeOp, InOp, RegexpOp, NullSafeEqualOp:
return P11
}
case *IsExpr:
diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go
index cbc481bb4d3..215c9480823 100644
--- a/go/vt/sqlparser/precedence_test.go
+++ b/go/vt/sqlparser/precedence_test.go
@@ -198,6 +198,7 @@ func TestParens(t *testing.T) {
{in: "10 - 2 - 1", expected: "10 - 2 - 1"},
{in: "(10 - 2) - 1", expected: "10 - 2 - 1"},
{in: "10 - (2 - 1)", expected: "10 - (2 - 1)"},
+ {in: "0 <=> (1 and 0)", expected: "0 <=> (1 and 0)"},
}
for _, tc := range tests {
diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go
index 8fd76760a4d..b110bc8a2fc 100644
--- a/go/vt/sqlparser/sql.go
+++ b/go/vt/sqlparser/sql.go
@@ -22,18 +22,6 @@ func setDDL(yylex yyLexer, node Statement) {
yylex.(*Tokenizer).partialDDL = node
}
-func incNesting(yylex yyLexer) bool {
- yylex.(*Tokenizer).nesting++
- if yylex.(*Tokenizer).nesting == 200 {
- return true
- }
- return false
-}
-
-func decNesting(yylex yyLexer) {
- yylex.(*Tokenizer).nesting--
-}
-
// skipToEnd forces the lexer to end prematurely. Not all SQL statements
// are supported by the Parser, thus calling skipToEnd will make the lexer
// return EOF early.
@@ -1365,19 +1353,19 @@ var yyExca = [...]int{
135, 155,
176, 155,
339, 155,
- -2, 505,
+ -2, 506,
-1, 57,
- 36, 751,
- 238, 751,
- 249, 751,
- 284, 765,
- 285, 765,
- -2, 753,
+ 36, 752,
+ 238, 752,
+ 249, 752,
+ 284, 766,
+ 285, 766,
+ -2, 754,
-1, 62,
- 240, 782,
- -2, 780,
+ 240, 783,
+ -2, 781,
-1, 116,
- 237, 1438,
+ 237, 1439,
-2, 121,
-1, 118,
1, 148,
@@ -1386,42 +1374,42 @@ var yyExca = [...]int{
-1, 129,
136, 391,
243, 391,
- -2, 494,
+ -2, 495,
-1, 148,
135, 155,
176, 155,
339, 155,
- -2, 514,
+ -2, 515,
-1, 798,
- 87, 1455,
- -2, 1300,
- -1, 799,
87, 1456,
- 221, 1460,
-2, 1301,
+ -1, 799,
+ 87, 1457,
+ 221, 1461,
+ -2, 1302,
-1, 800,
- 221, 1459,
+ 221, 1460,
-2, 39,
-1, 880,
- 60, 851,
- -2, 866,
+ 60, 852,
+ -2, 867,
-1, 966,
248, 40,
253, 40,
-2, 402,
-1, 1051,
- 1, 562,
- 657, 562,
+ 1, 563,
+ 657, 563,
-2, 155,
-1, 1340,
- 221, 1460,
- -2, 1301,
+ 221, 1461,
+ -2, 1302,
-1, 1488,
- 60, 852,
- -2, 871,
- -1, 1489,
60, 853,
-2, 872,
+ -1, 1489,
+ 60, 854,
+ -2, 873,
-1, 1540,
135, 155,
176, 155,
@@ -1430,17 +1418,17 @@ var yyExca = [...]int{
-1, 1619,
136, 391,
243, 391,
- -2, 494,
+ -2, 495,
-1, 1628,
248, 41,
253, 41,
-2, 403,
-1, 1982,
- 221, 1464,
- -2, 1458,
+ 221, 1465,
+ -2, 1459,
-1, 1983,
- 221, 1460,
- -2, 1456,
+ 221, 1461,
+ -2, 1457,
-1, 2083,
135, 155,
176, 155,
@@ -1452,1039 +1440,967 @@ var yyExca = [...]int{
-1, 2451,
78, 95,
88, 95,
- -2, 930,
+ -2, 931,
-1, 2519,
- 632, 678,
- -2, 652,
+ 632, 679,
+ -2, 653,
-1, 2686,
- 50, 1397,
- -2, 1391,
+ 50, 1398,
+ -2, 1392,
-1, 3338,
- 632, 678,
- -2, 666,
- -1, 3426,
- 23, 1816,
- 33, 1816,
- 177, 1816,
- 260, 1816,
- 319, 1816,
- 320, 1816,
- 321, 1816,
- 322, 1816,
- 323, 1816,
- 324, 1816,
- 325, 1816,
- 327, 1816,
- 328, 1816,
- 329, 1816,
- 330, 1816,
- 331, 1816,
- 332, 1816,
- 333, 1816,
- 334, 1816,
- 335, 1816,
- 336, 1816,
- 337, 1816,
- 338, 1816,
- 340, 1816,
- 342, 1816,
- 343, 1816,
- 344, 1816,
- 345, 1816,
- 346, 1816,
- 347, 1816,
- 348, 1816,
- 349, 1816,
- 350, 1816,
- 353, 1816,
- 354, 1816,
- 355, 1816,
- 356, 1816,
- 357, 1816,
- 359, 1816,
- 360, 1816,
- 361, 1816,
- 362, 1816,
- 503, 1816,
- -2, 610,
+ 632, 679,
+ -2, 667,
+ -1, 3427,
+ 23, 1817,
+ 33, 1817,
+ 177, 1817,
+ 260, 1817,
+ 319, 1817,
+ 320, 1817,
+ 321, 1817,
+ 322, 1817,
+ 323, 1817,
+ 324, 1817,
+ 325, 1817,
+ 327, 1817,
+ 328, 1817,
+ 329, 1817,
+ 330, 1817,
+ 331, 1817,
+ 332, 1817,
+ 333, 1817,
+ 334, 1817,
+ 335, 1817,
+ 336, 1817,
+ 337, 1817,
+ 338, 1817,
+ 340, 1817,
+ 342, 1817,
+ 343, 1817,
+ 344, 1817,
+ 345, 1817,
+ 346, 1817,
+ 347, 1817,
+ 348, 1817,
+ 349, 1817,
+ 350, 1817,
+ 353, 1817,
+ 354, 1817,
+ 355, 1817,
+ 356, 1817,
+ 357, 1817,
+ 359, 1817,
+ 360, 1817,
+ 361, 1817,
+ 362, 1817,
+ 503, 1817,
+ -2, 611,
}
const yyPrivate = 57344
-const yyLast = 47864
+const yyLast = 47193
var yyAct = [...]int{
- 1496, 3085, 3497, 3086, 3087, 3319, 3467, 3424, 3508, 3466,
- 802, 672, 3403, 3056, 1543, 652, 2913, 3369, 809, 2031,
- 801, 2835, 3392, 1849, 2738, 2745, 2011, 3303, 3251, 2795,
- 2080, 2800, 2797, 2796, 2794, 2799, 2798, 5, 2786, 3301,
- 2351, 3043, 1114, 2699, 3115, 873, 2703, 2702, 2645, 3291,
- 2385, 2815, 2013, 2150, 654, 2814, 2700, 2952, 2424, 2580,
- 3120, 764, 2946, 2051, 2753, 682, 2817, 1503, 2972, 2054,
- 2697, 763, 762, 2687, 2035, 2411, 1973, 1063, 650, 928,
- 2516, 2938, 769, 2113, 2484, 2841, 2564, 2138, 2118, 2485,
- 1597, 2486, 2436, 768, 2181, 2068, 39, 2417, 2403, 157,
- 1644, 2387, 38, 2056, 40, 1845, 2055, 1944, 1864, 1092,
- 2556, 896, 897, 875, 1490, 2159, 2198, 143, 2043, 998,
- 2137, 2120, 1803, 1626, 2478, 961, 956, 2453, 1532, 1970,
- 1512, 2058, 1116, 1470, 664, 1352, 1822, 1868, 647, 1743,
- 1280, 1633, 935, 1978, 2135, 932, 1725, 967, 936, 659,
- 2109, 1531, 98, 99, 1517, 974, 962, 1747, 963, 887,
- 1940, 1336, 884, 94, 1312, 79, 10, 93, 914, 9,
- 916, 8, 1112, 877, 964, 881, 1106, 1752, 1592, 1618,
- 1047, 885, 126, 127, 882, 100, 883, 909, 161, 658,
- 121, 119, 120, 641, 899, 2036, 78, 1356, 3328, 92,
- 2509, 3498, 1360, 101, 2152, 2153, 2154, 3354, 3044, 2783,
- 2152, 87, 1943, 2507, 904, 908, 3164, 3166, 3165, 3183,
- 3184, 3185, 3186, 3187, 3188, 3189, 704, 2539, 2538, 2196,
- 128, 1710, 3036, 3355, 621, 3450, 1877, 2999, 122, 929,
- 2572, 900, 2573, 89, 906, 906, 89, 3090, 1003, 3349,
- 3350, 2239, 1810, 588, 1809, 1808, 627, 890, 89, 1281,
- 923, 642, 891, 3090, 874, 1000, 818, 819, 820, 818,
- 819, 820, 1807, 1979, 1806, 954, 876, 1825, 1017, 1018,
- 1019, 1805, 1022, 1023, 1024, 1025, 978, 2, 1028, 1029,
- 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
- 1040, 1041, 1042, 1043, 1044, 977, 924, 122, 1011, 952,
- 951, 950, 898, 2805, 1783, 3404, 3445, 184, 953, 644,
- 1497, 645, 2008, 2009, 1004, 1007, 1008, 2383, 2512, 105,
- 106, 107, 2683, 110, 3089, 1281, 116, 1297, 2413, 185,
- 2529, 123, 583, 145, 2185, 2805, 3518, 2649, 3350, 940,
- 3089, 3454, 3452, 640, 166, 3465, 3488, 1020, 2802, 2918,
- 2917, 2532, 869, 870, 871, 872, 1291, 945, 880, 2803,
- 2183, 651, 922, 766, 767, 122, 3453, 3451, 3304, 2352,
- 1466, 1815, 2129, 2860, 3247, 156, 3246, 89, 2184, 3049,
- 3410, 144, 3050, 3410, 1002, 2809, 911, 912, 1001, 3479,
- 80, 2803, 80, 80, 3257, 2123, 3448, 3256, 3068, 3057,
- 163, 2248, 3393, 164, 621, 3400, 621, 2178, 80, 3067,
- 1854, 82, 3470, 3429, 2427, 2880, 1607, 2809, 3333, 2546,
- 2547, 2384, 1620, 1621, 155, 154, 183, 2734, 622, 922,
- 766, 767, 1291, 2571, 3170, 2735, 2736, 2075, 2076, 2428,
- 646, 3133, 2462, 2245, 2074, 2461, 2039, 1082, 2463, 3178,
- 3179, 1533, 1313, 1534, 2555, 867, 866, 1287, 1070, 3320,
- 1279, 2510, 1294, 1071, 1295, 1296, 2246, 2474, 89, 2949,
- 89, 89, 635, 621, 2857, 1314, 1315, 1316, 1317, 1318,
- 1319, 1320, 1322, 1321, 1323, 1324, 89, 2614, 2093, 2092,
- 2420, 2421, 1070, 2868, 1050, 2866, 2806, 1071, 2240, 2241,
- 2243, 2242, 1087, 1088, 1109, 1069, 1791, 1068, 639, 1530,
- 621, 770, 633, 683, 774, 685, 771, 772, 2010, 681,
- 684, 773, 2842, 2557, 1474, 149, 1622, 152, 2806, 1619,
- 1083, 150, 151, 1287, 3279, 2160, 3280, 167, 2517, 1076,
- 2837, 2122, 2542, 1046, 2199, 3500, 173, 702, 703, 3163,
- 3167, 3168, 3169, 3180, 3181, 3182, 3190, 3192, 735, 3191,
- 3193, 3194, 3195, 3198, 3199, 3200, 3201, 3196, 3197, 3202,
- 3147, 3151, 3148, 3149, 3150, 3162, 3152, 3153, 3154, 3155,
- 3156, 3157, 3158, 3159, 3160, 3161, 3203, 3204, 3205, 3206,
- 3207, 3208, 3173, 3177, 3176, 3174, 3175, 3171, 3172, 1089,
- 949, 3471, 1056, 1057, 621, 1726, 1700, 2830, 622, 1090,
- 622, 775, 1108, 776, 1091, 2831, 780, 1084, 1085, 1086,
- 782, 781, 3472, 783, 749, 748, 1077, 2838, 777, 778,
- 3446, 779, 2037, 2038, 1059, 1103, 2219, 1052, 2220, 621,
- 2221, 1731, 2559, 3038, 3037, 1021, 2615, 2222, 949, 1045,
- 1701, 915, 1702, 1027, 1026, 2839, 2201, 1066, 947, 1072,
- 1073, 1074, 1075, 2204, 2203, 158, 1330, 1331, 1332, 1333,
- 3231, 2163, 925, 919, 917, 2648, 1344, 622, 3094, 1348,
- 3034, 2039, 1110, 1111, 2052, 1477, 1286, 1283, 1284, 1285,
- 1290, 1292, 1289, 958, 1288, 987, 3209, 3210, 3211, 3212,
- 3213, 3214, 3215, 3216, 1282, 985, 2202, 3327, 1104, 2508,
- 3434, 627, 944, 1049, 622, 946, 996, 995, 2205, 1338,
- 955, 2211, 2207, 2209, 2210, 2208, 2212, 2213, 3432, 957,
- 994, 1529, 993, 958, 992, 991, 990, 3438, 3439, 925,
- 919, 917, 1611, 153, 989, 2755, 2756, 2511, 984, 2544,
- 997, 2757, 3433, 933, 3406, 621, 3512, 3406, 970, 2950,
- 3519, 2182, 1286, 1283, 1284, 1285, 1290, 1292, 1289, 1267,
- 1288, 815, 3477, 933, 815, 1327, 1327, 1298, 948, 969,
- 1282, 2476, 1268, 1269, 3405, 2246, 815, 3405, 2563, 1632,
- 933, 1334, 1744, 3033, 931, 1006, 2136, 618, 2560, 2126,
- 910, 969, 976, 2189, 146, 1005, 1353, 147, 622, 2188,
- 1740, 1048, 949, 1272, 941, 1014, 2776, 2858, 2541, 1080,
- 3088, 943, 942, 1605, 1604, 1603, 948, 1712, 1711, 1713,
- 1714, 1715, 2527, 2997, 2998, 1741, 3088, 159, 3066, 2127,
- 1601, 988, 83, 622, 171, 604, 2125, 587, 582, 976,
- 2531, 986, 2754, 1734, 2180, 1732, 1733, 602, 1735, 1736,
- 2576, 2260, 2388, 2390, 2757, 1328, 1329, 2037, 2038, 1358,
- 947, 1359, 2554, 1498, 1500, 2553, 3316, 2986, 1362, 2968,
- 2128, 2458, 918, 2247, 2423, 179, 88, 2360, 88, 88,
- 2124, 1730, 2807, 2808, 2530, 1857, 1631, 599, 1464, 1521,
- 1429, 1061, 2418, 975, 88, 2811, 613, 1465, 118, 969,
- 972, 973, 1481, 933, 2081, 1327, 1324, 966, 970, 1067,
- 2902, 609, 2733, 1055, 2807, 2808, 160, 165, 162, 168,
- 169, 170, 172, 174, 175, 176, 177, 2811, 965, 893,
- 1107, 1058, 178, 180, 181, 182, 1093, 1753, 2566, 918,
- 975, 2566, 113, 2565, 3341, 999, 2565, 3029, 1480, 622,
- 2962, 1065, 1484, 1435, 1436, 1437, 1438, 1439, 877, 1878,
- 1099, 1465, 1101, 2200, 3510, 1478, 1800, 3511, 1737, 3509,
- 1482, 1483, 1535, 1879, 98, 99, 2600, 1297, 1471, 1458,
- 948, 976, 589, 1869, 591, 605, 1869, 624, 2277, 623,
- 595, 2500, 593, 597, 606, 598, 2875, 592, 1297, 603,
- 1098, 1100, 594, 607, 608, 611, 614, 615, 616, 612,
- 610, 1079, 601, 625, 114, 1295, 1296, 1013, 2389, 1638,
- 1513, 3480, 1081, 1296, 1624, 101, 3129, 3004, 1297, 1727,
- 1499, 1728, 3003, 2167, 1729, 1817, 1819, 1820, 2179, 1468,
- 1641, 2581, 874, 1640, 1630, 2177, 1673, 2172, 1502, 1676,
- 2304, 1678, 2175, 1608, 1609, 1610, 987, 1599, 1617, 1818,
- 976, 876, 1497, 1695, 985, 1646, 1479, 1647, 1677, 1649,
- 1651, 2172, 3473, 1655, 1657, 1659, 1661, 1663, 1636, 1526,
- 1527, 2987, 975, 1094, 1754, 1051, 2176, 979, 969, 889,
- 1827, 3514, 981, 3371, 1064, 1635, 982, 980, 3520, 1096,
- 1600, 976, 1876, 1097, 1828, 1325, 1326, 1826, 1634, 1634,
- 2174, 3239, 1294, 1102, 1295, 1296, 3063, 983, 3064, 1614,
- 1685, 1686, 1615, 1613, 2583, 1627, 1691, 1692, 976, 2252,
- 2253, 2254, 3309, 1294, 1757, 1295, 1296, 1095, 3372, 1720,
- 1718, 1761, 2602, 1763, 1764, 1765, 1766, 1485, 3238, 3229,
- 1770, 3437, 1319, 1320, 1322, 1321, 1323, 1324, 1755, 1756,
- 3079, 975, 1782, 1294, 1707, 1295, 1296, 969, 972, 973,
- 3078, 933, 1760, 1918, 1749, 966, 970, 3310, 1745, 1767,
- 1768, 1769, 3011, 3010, 1751, 3521, 818, 819, 820, 3000,
- 1681, 2593, 2592, 2591, 2585, 3436, 2589, 1297, 2584, 2784,
- 2582, 2772, 975, 1719, 1717, 2587, 1297, 979, 969, 2482,
- 2481, 1497, 981, 1606, 2586, 2132, 982, 980, 1874, 2265,
- 122, 626, 952, 951, 950, 1721, 1705, 1875, 1706, 975,
- 1704, 1012, 2588, 2590, 1703, 1009, 1759, 1693, 1687, 1684,
- 1297, 1683, 619, 1314, 1315, 1316, 1317, 1318, 1319, 1320,
- 1322, 1321, 1323, 1324, 1682, 1780, 1653, 620, 2994, 627,
- 1781, 1975, 1910, 1899, 1900, 1901, 1902, 1912, 1903, 1904,
- 1905, 1917, 1913, 1906, 1907, 1914, 1915, 1916, 1908, 1909,
- 1911, 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324, 1313,
- 3484, 1497, 1309, 1796, 1310, 1315, 1316, 1317, 1318, 1319,
- 1320, 1322, 1321, 1323, 1324, 2834, 1271, 1831, 1311, 1325,
- 1326, 1308, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322,
- 1321, 1323, 1324, 627, 3482, 1497, 1530, 1852, 1852, 1850,
- 1850, 3474, 1294, 1853, 1295, 1296, 95, 1313, 1872, 2575,
- 3336, 1294, 1873, 1295, 1296, 2465, 627, 96, 1297, 39,
- 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1299, 1313, 1821,
- 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323,
- 1324, 1823, 2044, 2045, 3335, 1294, 1936, 1295, 1296, 1338,
- 2316, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321,
- 1323, 1324, 2148, 2147, 104, 3313, 1870, 2146, 2145, 2144,
- 2143, 3312, 1497, 1497, 1968, 103, 3311, 102, 1313, 3234,
- 2259, 3218, 1811, 1812, 1813, 1814, 97, 1506, 79, 1788,
- 1789, 1464, 1799, 1824, 3217, 1797, 3128, 1798, 3126, 1997,
- 1465, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321,
- 1323, 1324, 95, 3075, 1975, 2314, 1498, 2004, 1972, 97,
- 1463, 1297, 1980, 96, 1829, 1293, 1497, 1974, 2409, 3499,
- 3329, 1858, 1859, 1507, 3461, 1497, 1861, 1462, 906, 906,
- 1866, 813, 1293, 1497, 1871, 1497, 3265, 1971, 1461, 1856,
- 2028, 2409, 3399, 1294, 3008, 1295, 1296, 1884, 1885, 1886,
- 1887, 1888, 1889, 1890, 1891, 1892, 1893, 2409, 3379, 2409,
- 3375, 1919, 1920, 1921, 1922, 1923, 1924, 1926, 1497, 1931,
- 2993, 1933, 1934, 1935, 1297, 1937, 1938, 1939, 2843, 1945,
- 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955,
- 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965,
- 1966, 1967, 1982, 1969, 1980, 1976, 1977, 906, 2840, 906,
- 906, 906, 906, 906, 2090, 1985, 1986, 98, 99, 1989,
- 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1981, 1998, 1999,
- 2000, 2001, 2002, 2021, 2775, 2022, 2774, 98, 99, 104,
- 2133, 1880, 1881, 1882, 1883, 2027, 1294, 2491, 1295, 1296,
- 103, 2479, 102, 3362, 1497, 1894, 3047, 3326, 3416, 1497,
- 2062, 2085, 1460, 1863, 1865, 2194, 906, 1297, 2193, 2099,
- 2100, 2101, 2102, 2003, 2015, 3242, 1497, 2409, 3230, 2094,
- 2084, 2095, 2096, 2097, 2098, 2034, 1297, 2016, 2040, 2041,
- 2066, 3047, 1497, 3264, 1982, 2409, 3045, 2105, 2106, 2107,
- 2108, 2029, 1784, 890, 2172, 1497, 3222, 2088, 1750, 1294,
- 1716, 1295, 1296, 2047, 2079, 2115, 97, 2161, 923, 2049,
- 1497, 2072, 1830, 2121, 1832, 1833, 1834, 1835, 1836, 1837,
- 1838, 1839, 1840, 1841, 1842, 1843, 1844, 2071, 2087, 2070,
- 2086, 2966, 1497, 2158, 1984, 1297, 2454, 1987, 1988, 2325,
- 1497, 3414, 1497, 2765, 2764, 3221, 2131, 2761, 2762, 2761,
- 2760, 2433, 1497, 2454, 924, 1297, 2246, 2540, 1596, 2521,
- 3412, 1497, 1297, 2117, 184, 1497, 2312, 1708, 2116, 2111,
- 2112, 2514, 2515, 2409, 2408, 2130, 1698, 2134, 2270, 1497,
- 2425, 2142, 2405, 1694, 2166, 1690, 2186, 2169, 123, 2170,
- 1689, 2026, 1294, 1932, 1295, 1296, 1297, 97, 1688, 2455,
- 1930, 166, 2165, 2116, 2168, 2164, 1313, 1508, 978, 2457,
- 1941, 1294, 1105, 1295, 1296, 3055, 2455, 2190, 1634, 3288,
- 1497, 2191, 2192, 2187, 2518, 1297, 2246, 977, 2496, 1314,
- 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324,
- 1497, 1297, 1855, 1497, 2467, 1297, 3286, 1497, 1596, 1595,
- 1541, 1540, 2433, 2432, 2698, 2270, 2274, 163, 2089, 2728,
- 164, 2961, 1293, 2232, 2233, 2961, 2425, 103, 2235, 2246,
- 1294, 2963, 1295, 1296, 2257, 3367, 2197, 2236, 2005, 3340,
- 3283, 1497, 2173, 183, 1941, 2409, 2433, 2263, 2922, 2763,
- 1294, 2268, 1295, 1296, 2271, 2671, 2272, 1294, 2073, 1295,
- 1296, 2279, 2270, 2325, 2301, 2281, 2282, 2283, 2433, 3269,
- 1497, 2225, 2300, 2172, 2155, 2289, 2290, 2291, 2292, 2293,
- 2294, 2295, 2296, 2297, 2298, 2937, 1497, 1297, 2042, 2273,
- 2264, 1294, 1501, 1295, 1296, 2006, 1293, 1297, 2961, 1823,
- 2172, 1855, 1801, 1739, 3012, 1528, 1297, 2262, 960, 959,
- 879, 2305, 2306, 2307, 2308, 2309, 89, 2311, 1297, 3442,
- 1294, 2313, 1295, 1296, 3382, 2318, 2319, 3253, 2320, 1297,
- 1504, 2323, 2244, 2324, 1297, 3219, 1294, 2327, 1295, 1296,
- 1294, 2331, 1295, 1296, 3140, 2336, 2337, 2338, 2339, 3028,
- 3025, 1824, 1297, 2255, 167, 3013, 3014, 3015, 2350, 2787,
- 2353, 2354, 3006, 173, 1669, 1297, 2885, 2884, 2356, 2358,
- 2488, 2930, 1497, 1598, 1297, 2361, 2362, 2363, 2364, 2365,
- 1982, 2927, 1497, 2114, 1297, 2832, 2372, 2373, 89, 2374,
- 2925, 1497, 2377, 2379, 2028, 2276, 2381, 2789, 2278, 1297,
- 2785, 1050, 2890, 1497, 1297, 1981, 2393, 2522, 1297, 2285,
- 2286, 2287, 2288, 2873, 1497, 1670, 1671, 1672, 2380, 1497,
- 2110, 1297, 1294, 2104, 1295, 1296, 1852, 2103, 1850, 1723,
- 1297, 2394, 1294, 1629, 1295, 1296, 2378, 1497, 1625, 1510,
- 1594, 1294, 115, 1295, 1296, 2836, 1297, 3016, 3494, 2357,
- 1497, 2310, 3254, 1294, 1353, 1295, 1296, 2392, 2340, 1497,
- 2973, 2974, 2487, 2129, 1294, 1297, 1295, 1296, 3260, 1294,
- 2019, 1295, 1296, 2267, 3492, 3468, 1665, 1297, 1786, 2429,
- 3348, 3344, 158, 2266, 2395, 1297, 2397, 1294, 2981, 1295,
- 1296, 3274, 2332, 1497, 3017, 3018, 3019, 2976, 1297, 2410,
- 1294, 2781, 1295, 1296, 2780, 1509, 2779, 2698, 1297, 1294,
- 2488, 1295, 1296, 2501, 2226, 1497, 2979, 2978, 2717, 1294,
- 2448, 1295, 1296, 1666, 1667, 1668, 2419, 2716, 1471, 3255,
- 2382, 3475, 2406, 1297, 1294, 39, 1295, 1296, 2033, 1294,
- 1787, 1295, 1296, 1294, 2447, 1295, 1296, 2449, 1505, 2025,
- 3324, 2967, 2720, 1465, 1472, 2407, 1294, 2721, 1295, 1296,
- 2475, 2477, 3226, 2422, 2676, 1294, 2513, 1295, 1296, 1297,
- 3030, 1513, 2718, 2468, 3110, 1297, 3109, 2719, 2452, 2675,
- 2402, 1294, 2932, 1295, 1296, 2456, 3308, 2954, 1297, 2256,
- 3119, 2258, 2459, 2483, 3121, 2953, 2957, 2121, 894, 2466,
- 1294, 2469, 1295, 1296, 1297, 2722, 895, 2442, 2443, 2685,
- 585, 2537, 1294, 1738, 1295, 1296, 2759, 2928, 865, 2480,
- 1294, 2472, 1295, 1296, 3108, 2688, 2690, 2492, 868, 1016,
- 1297, 2489, 2490, 1294, 2691, 1295, 1296, 2493, 2494, 1015,
- 2535, 2498, 2497, 1294, 95, 1295, 1296, 2851, 2284, 1297,
- 2502, 2503, 2504, 2900, 159, 96, 2487, 2569, 2534, 2896,
- 1878, 171, 1270, 1617, 2528, 2299, 2578, 2959, 1294, 934,
- 1295, 1296, 2882, 95, 1879, 2604, 2605, 2606, 2607, 2608,
- 97, 2523, 2524, 123, 96, 2044, 2045, 1297, 2881, 97,
- 3506, 2777, 2533, 2229, 2613, 3421, 3325, 1297, 104, 3249,
- 2758, 2446, 179, 2030, 1294, 1297, 1295, 1296, 2218, 103,
- 1294, 102, 1295, 1296, 2878, 2217, 2594, 2939, 2558, 2674,
- 97, 902, 903, 1294, 2216, 1295, 1296, 2673, 2577, 3296,
- 2215, 2567, 2214, 2376, 2568, 2609, 2250, 102, 2561, 1294,
- 3295, 1295, 1296, 160, 165, 162, 168, 169, 170, 172,
- 174, 175, 176, 177, 3277, 3127, 3125, 3124, 3117, 178,
- 180, 181, 182, 2595, 3026, 1294, 2958, 1295, 1296, 2597,
- 2956, 2375, 2438, 2441, 2442, 2443, 2439, 104, 2440, 2444,
- 2574, 2371, 2973, 2974, 1294, 2650, 1295, 1296, 103, 2370,
- 102, 2790, 2156, 2655, 1612, 2652, 104, 901, 103, 906,
- 3116, 2947, 2598, 2599, 3495, 2425, 2601, 103, 3098, 2603,
- 2618, 3496, 3495, 3, 2405, 2616, 2302, 1971, 2017, 1971,
- 1522, 2707, 1294, 2624, 1295, 1296, 1297, 1514, 3496, 2610,
- 2611, 2612, 1294, 3314, 1295, 1296, 108, 109, 2992, 2725,
- 1294, 2617, 1295, 1296, 2619, 2620, 2621, 892, 91, 1,
- 2622, 2623, 2678, 2996, 1945, 2625, 2655, 3431, 2627, 600,
- 2007, 2629, 2630, 2631, 2632, 2679, 2710, 1469, 2727, 2633,
- 1945, 1945, 1945, 1945, 1945, 2651, 3469, 2653, 2666, 2701,
- 3427, 3428, 2670, 1709, 2701, 1699, 3058, 1942, 906, 2062,
- 3250, 2677, 2793, 2654, 2162, 2656, 2657, 2658, 2659, 2660,
- 2661, 2704, 2692, 2693, 2662, 2663, 3024, 2664, 2119, 2665,
- 2369, 2680, 968, 2729, 881, 148, 2730, 2062, 2062, 2062,
- 2062, 2062, 2709, 882, 2082, 883, 2695, 2723, 2712, 2713,
- 2711, 2715, 2813, 2714, 2667, 2668, 2669, 2062, 2083, 3395,
- 2062, 112, 926, 2731, 2634, 2635, 2636, 2637, 2638, 111,
- 98, 99, 971, 1078, 2696, 2157, 3048, 2737, 2473, 2091,
- 1547, 1294, 1545, 1295, 1296, 2853, 2769, 2768, 2767, 2792,
- 1546, 2438, 2441, 2442, 2443, 2439, 1297, 2440, 2444, 2726,
- 1544, 1749, 2855, 1549, 2791, 2870, 2871, 2872, 1548, 2874,
- 2876, 2820, 2821, 799, 2859, 1297, 2303, 2770, 2771, 2121,
- 2812, 2901, 1790, 2883, 2827, 634, 2445, 628, 2887, 2888,
- 2889, 2891, 2892, 2893, 2894, 186, 1536, 2895, 1297, 2897,
- 2898, 2899, 1515, 2579, 2903, 2904, 2905, 2906, 2907, 2908,
- 2909, 2910, 2911, 2912, 2844, 2849, 2850, 2847, 2846, 2788,
- 2596, 2914, 2919, 1010, 2854, 2923, 189, 2924, 2926, 189,
- 2929, 2931, 632, 2933, 2934, 2935, 2936, 638, 590, 2864,
- 2368, 2942, 2861, 2862, 2766, 2863, 2195, 189, 2865, 596,
- 2867, 1345, 2869, 1297, 1785, 2672, 2916, 2460, 1297, 2367,
- 921, 913, 189, 2920, 1297, 2018, 2396, 920, 3227, 2706,
- 2951, 1297, 2684, 2686, 2412, 2689, 2964, 2965, 2682, 2626,
- 2969, 2628, 2366, 1297, 3307, 3118, 3380, 638, 189, 638,
- 2470, 1294, 1511, 1295, 1296, 2921, 2275, 2639, 2640, 2641,
- 2642, 1867, 1297, 1335, 2059, 3093, 2945, 1816, 656, 655,
- 1294, 2877, 1295, 1296, 1297, 653, 2398, 1054, 2943, 1060,
- 2940, 2941, 1062, 2426, 1300, 2886, 1297, 803, 2948, 2386,
- 2955, 1523, 1297, 1294, 2437, 1295, 1296, 2355, 1297, 2970,
- 2960, 2435, 2349, 2434, 1297, 2227, 2067, 2975, 2348, 2971,
- 1297, 3423, 2061, 2977, 2057, 2347, 2404, 754, 2984, 2985,
- 753, 2982, 2980, 665, 657, 649, 1297, 2346, 2983, 752,
- 751, 1297, 2989, 2819, 3407, 1274, 1297, 2990, 2991, 3031,
- 3032, 1297, 2543, 2820, 2821, 3046, 2345, 1297, 1294, 2833,
- 1295, 1296, 2062, 1294, 2545, 1295, 1296, 2471, 2344, 1294,
- 2829, 1295, 1296, 3052, 3053, 3007, 1294, 3009, 1295, 1296,
- 2343, 1297, 1278, 1487, 643, 939, 2342, 2856, 1294, 3331,
- 1295, 1296, 2341, 2249, 2879, 1486, 1897, 3065, 2335, 1898,
- 3069, 1297, 3338, 2801, 2334, 3042, 2782, 1294, 2519, 1295,
- 1296, 3001, 3002, 2149, 65, 43, 3302, 3368, 750, 1294,
- 2333, 1295, 1296, 747, 3095, 2330, 3096, 3080, 3097, 1297,
- 2329, 1294, 2646, 1295, 1296, 2328, 3054, 1294, 1297, 1295,
- 1296, 2326, 3084, 1294, 2647, 1295, 1296, 3351, 3352, 1294,
- 746, 1295, 1296, 3353, 3092, 1294, 1925, 1295, 1296, 1275,
- 1297, 3444, 3099, 3035, 3027, 2322, 1792, 3039, 3040, 3041,
- 90, 1294, 34, 1295, 1296, 33, 1294, 3070, 1295, 1296,
- 32, 1294, 31, 1295, 1296, 2321, 1294, 30, 1295, 1296,
- 25, 24, 1294, 23, 1295, 1296, 22, 3051, 21, 27,
- 1297, 20, 3091, 19, 18, 2804, 3464, 3505, 117, 3113,
- 52, 49, 47, 2317, 125, 124, 1294, 50, 1295, 1296,
- 46, 1053, 2315, 44, 29, 28, 17, 16, 15, 14,
- 13, 3074, 12, 11, 7, 6, 1294, 37, 1295, 1296,
- 1852, 36, 1850, 3134, 2280, 3142, 35, 1795, 3114, 3123,
- 3132, 3122, 3071, 26, 3072, 3136, 3138, 3073, 3130, 4,
- 3076, 3077, 2506, 2701, 1294, 2151, 1295, 1296, 0, 3081,
- 0, 0, 0, 1294, 0, 1295, 1296, 0, 0, 3083,
- 0, 3143, 3144, 3241, 2269, 0, 2704, 3146, 3228, 0,
- 2704, 0, 3248, 0, 0, 1294, 0, 1295, 1296, 39,
- 0, 3100, 0, 0, 3101, 0, 3102, 3103, 0, 3104,
- 0, 3105, 3258, 3259, 1525, 3261, 3106, 3262, 3263, 0,
- 3225, 3224, 3266, 3267, 3268, 0, 3270, 3273, 3223, 3271,
- 3272, 0, 1542, 0, 3240, 1294, 0, 1295, 1296, 3245,
- 3252, 3131, 3282, 3284, 3285, 3287, 3289, 3290, 3292, 1852,
- 3244, 1850, 3139, 0, 3275, 3141, 0, 3232, 0, 0,
- 0, 0, 0, 0, 0, 1495, 1491, 3145, 0, 0,
- 0, 0, 1495, 1491, 0, 0, 0, 0, 0, 3276,
- 1492, 0, 0, 0, 3278, 3220, 3322, 1492, 3281, 0,
- 0, 0, 0, 0, 0, 0, 3235, 3236, 3237, 3300,
- 3318, 3297, 3298, 0, 3299, 2023, 2024, 1494, 0, 1493,
- 0, 0, 1488, 1489, 1494, 1679, 1493, 0, 0, 0,
- 0, 3315, 0, 0, 0, 0, 189, 0, 189, 3321,
- 184, 189, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1724, 0, 0, 0, 123, 0, 2704, 0, 0, 0,
- 0, 638, 0, 638, 638, 0, 0, 166, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 2741, 0, 3323,
- 0, 0, 1758, 638, 189, 0, 0, 0, 0, 1762,
- 3317, 0, 0, 0, 0, 3305, 0, 0, 0, 0,
- 1773, 1774, 1775, 1776, 1777, 1778, 1779, 0, 0, 0,
- 0, 1340, 3347, 3342, 0, 0, 0, 3082, 0, 0,
- 0, 0, 2742, 163, 3337, 0, 164, 3339, 3334, 0,
+ 1496, 3085, 3086, 809, 1849, 3087, 3500, 3319, 801, 802,
+ 3511, 672, 3404, 3468, 2080, 3469, 2913, 3425, 3056, 2031,
+ 1116, 1543, 3369, 2835, 1747, 3392, 2011, 2738, 651, 2745,
+ 3303, 3251, 2795, 2699, 2800, 2797, 2796, 2794, 1114, 2799,
+ 2786, 3301, 2798, 2351, 873, 3043, 896, 3115, 2814, 2815,
+ 2013, 2645, 3291, 2150, 2385, 764, 2702, 654, 2952, 2703,
+ 5, 2424, 2580, 2700, 2753, 2946, 682, 3120, 2051, 1503,
+ 2972, 763, 762, 2697, 769, 2817, 2687, 2411, 2938, 1063,
+ 650, 2484, 2516, 768, 2564, 652, 39, 2054, 2035, 2113,
+ 2485, 2841, 2138, 2118, 928, 998, 2486, 2181, 1597, 2068,
+ 897, 875, 2436, 1644, 1973, 40, 2403, 38, 1978, 2055,
+ 2417, 2387, 1943, 1944, 1970, 2056, 1864, 1490, 1845, 1092,
+ 2556, 2159, 143, 2137, 2043, 2198, 1803, 2120, 2478, 1532,
+ 961, 956, 2453, 664, 1512, 94, 157, 1868, 1626, 2058,
+ 98, 99, 1352, 1470, 1280, 1822, 1633, 935, 932, 2135,
+ 967, 936, 964, 1743, 1725, 2109, 962, 963, 914, 1531,
+ 1517, 916, 887, 877, 1940, 881, 1877, 1979, 1336, 974,
+ 1312, 659, 10, 1106, 161, 101, 2036, 884, 1112, 1752,
+ 79, 9, 8, 1592, 899, 1618, 121, 883, 119, 882,
+ 120, 93, 126, 127, 1047, 909, 87, 885, 658, 1360,
+ 3328, 78, 92, 2509, 1356, 3501, 2152, 2153, 2154, 3354,
+ 100, 3044, 2783, 2152, 2539, 2538, 2196, 2507, 3036, 89,
+ 1710, 1281, 904, 908, 122, 2999, 89, 89, 641, 890,
+ 3452, 818, 819, 820, 588, 3355, 128, 2572, 2573, 3090,
+ 3350, 1000, 1281, 627, 929, 1825, 818, 819, 820, 3349,
+ 1810, 2008, 2009, 3090, 1017, 1018, 1019, 2239, 1022, 1023,
+ 1024, 1025, 2805, 1809, 1028, 1029, 1030, 1031, 1032, 1033,
+ 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043,
+ 1044, 876, 923, 977, 1808, 1003, 891, 924, 184, 874,
+ 2, 1807, 1806, 122, 621, 3405, 642, 898, 1805, 953,
+ 978, 1783, 1004, 1007, 1008, 952, 644, 951, 645, 950,
+ 1497, 3446, 123, 2383, 2683, 627, 2185, 184, 2803, 3472,
+ 1466, 945, 1011, 3521, 3467, 166, 3089, 3456, 1291, 922,
+ 766, 767, 105, 106, 107, 954, 110, 1297, 1020, 116,
+ 3089, 123, 185, 145, 2809, 583, 3491, 940, 3350, 1291,
+ 3454, 2649, 3455, 2918, 166, 3411, 640, 2413, 2917, 621,
+ 2184, 122, 2532, 3304, 2352, 869, 870, 871, 872, 2529,
+ 3411, 880, 2183, 621, 2805, 3453, 1815, 2860, 3247, 2129,
+ 3246, 163, 80, 3049, 164, 156, 3050, 2802, 1002, 1001,
+ 646, 144, 3482, 3257, 2248, 1313, 3450, 3068, 80, 911,
+ 912, 618, 2123, 3057, 3333, 80, 2427, 183, 82, 3393,
+ 163, 3401, 3256, 164, 2178, 3067, 89, 2857, 1314, 1315,
+ 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324, 1287,
+ 2803, 2428, 132, 133, 155, 154, 183, 1854, 3430, 621,
+ 2880, 1607, 2546, 2547, 1533, 2462, 1534, 3133, 2461, 604,
+ 1287, 2463, 2384, 1279, 80, 2806, 2809, 2010, 2074, 2246,
+ 89, 602, 2735, 2736, 2734, 922, 766, 767, 2075, 2076,
+ 2571, 2555, 1294, 2245, 1295, 1296, 89, 1109, 1082, 1050,
+ 867, 2039, 866, 89, 1087, 1088, 3320, 2474, 1070, 2093,
+ 2092, 1070, 949, 1071, 1056, 1057, 1071, 2510, 622, 1083,
+ 1076, 599, 2837, 621, 1069, 2949, 1068, 1046, 3473, 621,
+ 613, 2420, 2421, 621, 2240, 2241, 2243, 2242, 167, 2614,
+ 621, 635, 2868, 2866, 1791, 609, 1059, 173, 639, 3474,
+ 1530, 633, 89, 1474, 2557, 149, 130, 152, 137, 129,
+ 2842, 150, 151, 2517, 1700, 2039, 2160, 167, 2122, 2542,
+ 947, 915, 2199, 1099, 1021, 1101, 173, 138, 3279, 2204,
+ 3280, 2830, 2219, 622, 2220, 3503, 2221, 2806, 1103, 2831,
+ 1726, 141, 139, 134, 135, 136, 140, 622, 1085, 1086,
+ 1091, 1089, 1052, 131, 2559, 1108, 1084, 1077, 1701, 2838,
+ 1702, 1090, 142, 1098, 1100, 3038, 589, 2839, 591, 605,
+ 3037, 624, 2222, 623, 595, 1027, 593, 597, 606, 598,
+ 3231, 592, 1026, 603, 2205, 2201, 594, 607, 608, 611,
+ 614, 615, 616, 612, 610, 3034, 601, 625, 2163, 3094,
+ 2052, 958, 987, 949, 1045, 3447, 985, 996, 957, 925,
+ 919, 917, 958, 622, 1611, 995, 158, 2211, 2207, 2209,
+ 2210, 2208, 2212, 2213, 994, 993, 992, 2203, 1286, 1283,
+ 1284, 1285, 1290, 1292, 1289, 1472, 1288, 2037, 2038, 1066,
+ 948, 1072, 1073, 1074, 1075, 158, 1282, 991, 2615, 1286,
+ 1283, 1284, 1285, 1290, 1292, 1289, 647, 1288, 184, 2648,
+ 990, 989, 1096, 1477, 1110, 1111, 1097, 1282, 1049, 2202,
+ 984, 997, 1348, 1327, 2246, 2563, 1102, 622, 3522, 1327,
+ 2757, 3515, 123, 622, 933, 933, 3479, 622, 931, 3327,
+ 970, 585, 2508, 1338, 622, 166, 933, 3435, 1104, 3407,
+ 1095, 2037, 2038, 1632, 969, 1744, 2388, 2390, 3033, 868,
+ 2136, 910, 2560, 2189, 3407, 3433, 1330, 1331, 1332, 1333,
+ 2188, 1740, 1529, 153, 3439, 3440, 1344, 815, 1272, 3406,
+ 2858, 1014, 2755, 2756, 815, 815, 1267, 1080, 2467, 3434,
+ 2776, 2541, 2544, 2182, 3406, 925, 919, 917, 988, 976,
+ 934, 163, 986, 2511, 164, 1731, 1605, 1604, 2576, 900,
+ 955, 1006, 906, 906, 2260, 2950, 1048, 969, 1268, 1269,
+ 1334, 1005, 1603, 2527, 1741, 1601, 2126, 183, 587, 582,
+ 2554, 948, 3448, 2553, 146, 2180, 2476, 147, 159, 1328,
+ 1329, 3316, 3088, 2986, 2968, 171, 1712, 1711, 1713, 1714,
+ 1715, 2997, 2998, 2458, 2423, 626, 3088, 118, 2081, 83,
+ 1631, 1730, 2360, 1857, 3066, 1521, 2127, 159, 1429, 918,
+ 1061, 2807, 2808, 2125, 171, 1327, 619, 944, 2418, 1324,
+ 946, 2733, 893, 113, 2811, 1107, 179, 2566, 2566, 2754,
+ 3341, 620, 2565, 2565, 1093, 999, 2247, 3029, 88, 1878,
+ 975, 2757, 2600, 1498, 1500, 1362, 1358, 2128, 1359, 2531,
+ 1753, 2962, 1065, 1879, 88, 179, 2200, 2124, 1464, 1800,
+ 1737, 88, 2389, 976, 1535, 1058, 1869, 160, 165, 162,
+ 168, 169, 170, 172, 174, 175, 176, 177, 167, 3483,
+ 1478, 1481, 1465, 178, 180, 181, 182, 173, 2500, 3513,
+ 2902, 1055, 3514, 2530, 3512, 114, 160, 165, 162, 168,
+ 169, 170, 172, 174, 175, 176, 177, 1869, 1067, 2277,
+ 88, 2741, 178, 180, 181, 182, 1296, 949, 1480, 941,
+ 3129, 1013, 1484, 2807, 2808, 3004, 943, 942, 877, 1079,
+ 1435, 1436, 1437, 1438, 1439, 3003, 2811, 1297, 1482, 1483,
+ 1081, 2167, 98, 99, 1641, 918, 1465, 1295, 1296, 1727,
+ 1640, 1728, 1297, 1630, 1729, 2177, 2742, 1734, 2175, 1732,
+ 1733, 2172, 1735, 1736, 975, 2172, 1471, 987, 1458, 2179,
+ 969, 972, 973, 985, 933, 947, 3371, 101, 966, 970,
+ 2744, 1094, 1876, 3475, 1051, 976, 1319, 1320, 1322, 1321,
+ 1323, 1324, 889, 1638, 2987, 1064, 3517, 1754, 2739, 965,
+ 2176, 2252, 2253, 2254, 2174, 1297, 158, 1720, 3239, 1608,
+ 1609, 1610, 1827, 3238, 3229, 2755, 2756, 3523, 3079, 1624,
+ 1673, 3372, 2740, 1676, 2581, 1678, 1828, 1325, 1326, 1826,
+ 1297, 1479, 976, 3309, 1499, 1718, 3487, 1497, 1817, 1819,
+ 1820, 1695, 1749, 1617, 1502, 976, 876, 874, 1646, 3063,
+ 1647, 3064, 1649, 1651, 1468, 2746, 1655, 1657, 1659, 1661,
+ 1663, 3078, 1818, 1677, 3011, 3010, 1975, 1526, 1527, 3000,
+ 1972, 1719, 1294, 1636, 1295, 1296, 1685, 1686, 3310, 1974,
+ 1635, 2602, 1691, 1692, 1297, 2784, 975, 1294, 976, 1295,
+ 1296, 979, 969, 813, 1600, 948, 981, 1975, 2772, 1717,
+ 982, 980, 1634, 1634, 3524, 1614, 2482, 2583, 2044, 2045,
+ 1627, 2481, 1615, 1613, 3485, 1497, 1757, 818, 819, 820,
+ 1707, 983, 2754, 1761, 2132, 1763, 1764, 1765, 1766, 1497,
+ 1485, 1721, 1770, 975, 2757, 1012, 1874, 1705, 3438, 1009,
+ 1294, 1704, 1295, 1296, 1782, 1875, 975, 1703, 1755, 1756,
+ 1693, 1681, 969, 972, 973, 1687, 933, 1918, 1684, 3476,
+ 966, 970, 1760, 1683, 1682, 1294, 1653, 1295, 1296, 1767,
+ 1768, 1769, 1745, 2834, 2593, 2592, 2591, 2585, 159, 2589,
+ 627, 2584, 3437, 2582, 1706, 171, 122, 1271, 2587, 975,
+ 2994, 627, 2465, 627, 979, 969, 1606, 2586, 952, 981,
+ 951, 1530, 950, 982, 980, 2148, 2147, 3336, 1054, 3335,
+ 1060, 2146, 2145, 1062, 95, 2588, 2590, 3313, 1759, 1294,
+ 3312, 1295, 1296, 2144, 2143, 96, 179, 1315, 1316, 1317,
+ 1318, 1319, 1320, 1322, 1321, 1323, 1324, 1781, 1297, 2409,
+ 3502, 1780, 3463, 1497, 1497, 2743, 1910, 1899, 1900, 1901,
+ 1902, 1912, 1903, 1904, 1905, 1917, 1913, 1906, 1907, 1914,
+ 1915, 1916, 1908, 1909, 1911, 3311, 1274, 160, 165, 162,
+ 168, 169, 170, 172, 174, 175, 176, 177, 2314, 3234,
+ 1497, 97, 3218, 178, 180, 181, 182, 1506, 1293, 1497,
+ 1293, 1497, 1852, 1852, 1853, 1298, 1796, 1850, 1850, 1830,
+ 1297, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840,
+ 1841, 1842, 1843, 1844, 2409, 3400, 2409, 3379, 1872, 39,
+ 2409, 3375, 1873, 1497, 1353, 1317, 1318, 1319, 1320, 1322,
+ 1321, 1323, 1324, 1507, 3217, 1313, 1297, 2575, 3362, 1497,
+ 1497, 1821, 3128, 1823, 1301, 1302, 1303, 1304, 1305, 1306,
+ 1307, 1299, 3126, 1338, 3075, 2875, 1936, 1831, 1314, 1315,
+ 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324, 3329,
+ 1870, 3047, 3326, 1294, 1463, 1295, 1296, 104, 3242, 1497,
+ 1932, 2409, 3230, 104, 1968, 3477, 2961, 1462, 103, 1461,
+ 102, 1464, 3047, 1497, 103, 1297, 102, 2409, 3045, 97,
+ 3008, 2993, 1824, 2843, 1297, 1788, 1789, 2840, 1799, 1997,
+ 1982, 2172, 1497, 79, 1981, 1465, 2775, 1797, 1798, 1313,
+ 2747, 1497, 2966, 1497, 2751, 2774, 1498, 2004, 1980, 2325,
+ 1497, 2750, 2491, 2765, 2764, 1294, 2479, 1295, 1296, 1460,
+ 1829, 2316, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322,
+ 1321, 1323, 1324, 1297, 2761, 2762, 95, 1971, 1497, 3265,
+ 2028, 2405, 1297, 97, 1497, 2752, 1856, 96, 2761, 2760,
+ 2748, 1294, 2194, 1295, 1296, 2749, 97, 1863, 1865, 3417,
+ 1497, 1297, 2433, 1497, 1313, 1297, 2259, 2193, 2021, 3324,
+ 2022, 1880, 1881, 1882, 1883, 2034, 2274, 2016, 2246, 2540,
+ 1985, 1986, 1982, 1596, 2521, 1894, 2049, 1314, 1315, 1316,
+ 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324, 2090, 1784,
+ 1980, 2454, 1497, 2514, 2515, 98, 99, 2409, 2408, 2454,
+ 1294, 1750, 1295, 1296, 2270, 1525, 3264, 3415, 1497, 1294,
+ 1716, 1295, 1296, 2270, 1497, 98, 99, 3226, 1513, 1313,
+ 1855, 1497, 3222, 1542, 1708, 1698, 1694, 1690, 1984, 1689,
+ 2133, 1987, 1988, 1688, 1508, 3413, 1497, 3221, 2027, 2273,
+ 2062, 2425, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322,
+ 1321, 1323, 1324, 1105, 2455, 1599, 1596, 1595, 1294, 3055,
+ 1295, 1296, 2455, 103, 2457, 890, 2085, 1294, 2015, 1295,
+ 1296, 2425, 2246, 2099, 2100, 2101, 2102, 2094, 1297, 2095,
+ 2096, 2097, 2098, 2432, 2066, 2026, 1294, 2003, 1295, 1296,
+ 1294, 2728, 1295, 1296, 2029, 2105, 2106, 2107, 2108, 2698,
+ 2304, 2246, 2088, 1541, 1540, 2161, 1679, 2084, 2518, 2047,
+ 2961, 2115, 2496, 2433, 2173, 2089, 1293, 2071, 2121, 2072,
+ 2070, 2963, 3367, 3340, 2409, 2433, 1297, 2922, 2087, 2086,
+ 923, 2763, 1293, 2671, 1313, 924, 1297, 1309, 2433, 1310,
+ 1930, 1724, 2073, 2961, 2270, 2325, 2301, 2300, 2158, 2172,
+ 1941, 2155, 2131, 1311, 1325, 1326, 1308, 1314, 1315, 1316,
+ 1317, 1318, 1319, 1320, 1322, 1321, 1323, 1324, 2042, 1501,
+ 2006, 2116, 2172, 1758, 2111, 2112, 1297, 1855, 2130, 1801,
+ 1762, 1739, 1751, 1528, 879, 2134, 2142, 960, 959, 89,
+ 3443, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1297, 2166,
+ 3012, 2186, 2169, 3382, 2170, 977, 2116, 2165, 2164, 2168,
+ 3288, 1497, 1297, 1294, 3253, 1295, 1296, 1504, 2005, 2787,
+ 3286, 1497, 978, 2312, 1941, 1297, 3219, 3140, 3028, 2187,
+ 2190, 3025, 1634, 3006, 2191, 2192, 1314, 1315, 1316, 1317,
+ 1318, 1319, 1320, 1322, 1321, 1323, 1324, 1669, 2885, 2884,
+ 2267, 3013, 3014, 3015, 1297, 2488, 1598, 2114, 1297, 1050,
+ 2266, 1294, 89, 1295, 1296, 2232, 2233, 2832, 1297, 2789,
+ 2235, 1294, 2785, 1295, 1296, 2522, 2110, 2104, 2103, 2236,
+ 2197, 3016, 3283, 1497, 1723, 1297, 2256, 2263, 2258, 1629,
+ 2265, 2268, 1625, 1594, 2271, 2836, 2272, 1497, 1670, 1671,
+ 1672, 2279, 3254, 115, 2129, 2281, 2282, 2283, 1982, 3269,
+ 1497, 1294, 1981, 1295, 1296, 2289, 2290, 2291, 2292, 2293,
+ 2294, 2295, 2296, 2297, 2298, 2225, 2264, 2979, 3017, 3018,
+ 3019, 1823, 2487, 1294, 2019, 1295, 1296, 1786, 2937, 1497,
+ 1297, 2262, 2930, 1497, 2257, 3497, 1297, 1294, 3495, 1295,
+ 1296, 2305, 2306, 2307, 2308, 2309, 3470, 2311, 2978, 3348,
+ 1294, 2313, 1295, 1296, 3274, 2318, 2319, 2976, 2320, 2927,
+ 1497, 2323, 2781, 2324, 2780, 1804, 2244, 2327, 2973, 2974,
+ 2488, 2331, 2779, 2698, 2501, 2336, 2337, 2338, 2339, 1294,
+ 1824, 1295, 1296, 1294, 2226, 1295, 1296, 2717, 2350, 1787,
+ 2353, 2354, 2255, 1294, 2716, 1295, 1296, 3344, 2356, 2358,
+ 1811, 1812, 1813, 1814, 3255, 2361, 2362, 2363, 2364, 2365,
+ 1294, 2033, 1295, 1296, 2925, 1497, 2372, 2373, 1505, 2374,
+ 2890, 1497, 2377, 2379, 2028, 2276, 2381, 2438, 2441, 2442,
+ 2443, 2439, 2967, 2440, 2444, 2720, 2393, 2973, 2974, 2718,
+ 2721, 1852, 2394, 2025, 2719, 2722, 1850, 2442, 2443, 1858,
+ 1859, 2676, 1297, 2675, 1861, 2954, 906, 906, 1866, 3308,
+ 2688, 2690, 1871, 2953, 3119, 1294, 3121, 1295, 1296, 2691,
+ 1510, 1294, 2957, 1295, 1296, 1884, 1885, 1886, 1887, 1888,
+ 1889, 1890, 1891, 1892, 1893, 2392, 2685, 1738, 2310, 1919,
+ 1920, 1921, 1922, 1923, 1924, 1926, 3110, 1931, 3109, 1933,
+ 1934, 1935, 865, 1937, 1938, 1939, 2759, 1945, 1946, 1947,
+ 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957,
+ 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967,
+ 1665, 1969, 2429, 1976, 1977, 906, 1509, 906, 906, 906,
+ 906, 906, 2395, 2410, 2397, 2472, 3108, 1989, 1990, 1991,
+ 1992, 1993, 1994, 1995, 1996, 39, 1998, 1999, 2000, 2001,
+ 2002, 2492, 1297, 894, 2447, 2406, 1471, 2449, 2448, 2419,
+ 2382, 895, 1878, 1016, 1015, 2851, 1297, 1666, 1667, 1668,
+ 2487, 1297, 2402, 2569, 2407, 1297, 1879, 1294, 1465, 1295,
+ 1296, 95, 95, 1270, 906, 2528, 2513, 2422, 97, 123,
+ 2046, 1297, 96, 96, 2475, 2477, 2959, 1297, 2050, 97,
+ 2053, 3509, 2452, 1804, 1495, 1491, 2040, 2041, 2284, 1297,
+ 2456, 2044, 2045, 2459, 3422, 2537, 104, 2777, 2229, 1492,
+ 3325, 3249, 2121, 2758, 2466, 2299, 2469, 103, 2490, 102,
+ 2468, 1297, 2079, 2493, 2494, 2446, 2873, 1497, 97, 1297,
+ 2030, 902, 903, 2480, 2023, 2024, 1494, 2218, 1493, 1297,
+ 2380, 1497, 2217, 1297, 2535, 2378, 1497, 2489, 2216, 2357,
+ 1497, 2674, 2215, 2214, 2939, 1297, 799, 2250, 2497, 2673,
+ 2498, 102, 104, 2502, 2503, 2504, 3030, 3296, 3295, 3277,
+ 2534, 2340, 1497, 103, 103, 102, 1297, 1294, 1617, 1295,
+ 1296, 2117, 3127, 2332, 1497, 2604, 2605, 2606, 2607, 2608,
+ 2579, 1294, 3116, 1295, 1296, 3125, 1294, 1297, 1295, 1296,
+ 1294, 3124, 1295, 1296, 2613, 3260, 2523, 2524, 2533, 189,
+ 1297, 3117, 189, 2981, 3026, 632, 1294, 2958, 1295, 1296,
+ 638, 2956, 1294, 2932, 1295, 1296, 1297, 2928, 2790, 2594,
+ 189, 2156, 1612, 901, 1294, 2947, 1295, 1296, 2558, 2900,
+ 2577, 2425, 104, 3098, 2609, 189, 2578, 2405, 3499, 3498,
+ 3498, 2567, 2561, 103, 2568, 2616, 1294, 2302, 1295, 1296,
+ 2896, 2017, 1297, 1522, 1294, 1514, 1295, 1296, 3499, 1297,
+ 638, 189, 638, 3314, 1294, 1297, 1295, 1296, 1294, 2992,
+ 1295, 1296, 2483, 108, 109, 892, 2595, 2597, 3, 1804,
+ 1294, 91, 1295, 1296, 2882, 1, 2206, 2996, 2650, 2655,
+ 1495, 1491, 3432, 600, 2007, 2223, 2224, 1469, 2652, 2228,
+ 2881, 1294, 3471, 1295, 1296, 1492, 3428, 1297, 2231, 3429,
+ 1709, 1699, 3058, 1942, 3250, 2234, 2793, 1971, 2162, 1971,
+ 3024, 2707, 1294, 2618, 1295, 1296, 2119, 968, 148, 2624,
+ 1488, 1489, 1494, 2082, 1493, 1294, 2878, 1295, 1296, 2725,
+ 1297, 2237, 2083, 2376, 2634, 2635, 2636, 2637, 2638, 2375,
+ 3395, 1294, 2655, 1295, 1296, 2678, 2710, 112, 2654, 926,
+ 2651, 2695, 2653, 111, 971, 1078, 2157, 3048, 2727, 1749,
+ 2473, 2091, 1547, 2701, 1545, 1546, 1544, 1549, 2701, 2062,
+ 1548, 2859, 2303, 2679, 2901, 2670, 1790, 1294, 2666, 1295,
+ 1296, 2371, 634, 2704, 1294, 2445, 1295, 1296, 628, 2677,
+ 1294, 186, 1295, 1296, 881, 1536, 1515, 2062, 2062, 2062,
+ 2062, 2062, 2914, 1010, 2680, 2692, 2693, 590, 2766, 2195,
+ 596, 1345, 1785, 2672, 2370, 2709, 883, 2062, 882, 2460,
+ 2062, 2729, 2813, 2711, 2730, 921, 2714, 2723, 98, 99,
+ 2712, 2713, 1294, 2715, 1295, 1296, 2278, 2731, 2667, 2668,
+ 2669, 913, 2018, 2396, 920, 3227, 2706, 2285, 2286, 2287,
+ 2288, 2951, 2684, 2792, 2686, 2853, 2412, 2769, 2689, 2682,
+ 3307, 2767, 2768, 2770, 2771, 1294, 3118, 1295, 1296, 1297,
+ 2596, 3380, 2470, 1511, 2921, 2870, 2871, 2872, 1297, 2874,
+ 2876, 2820, 2821, 1297, 2737, 2275, 1867, 1297, 1335, 2791,
+ 2059, 3093, 1353, 2883, 2121, 2812, 2827, 1297, 2887, 2888,
+ 2889, 2891, 2892, 2893, 2894, 1816, 656, 2895, 655, 2897,
+ 2898, 2899, 1297, 653, 2903, 2904, 2905, 2906, 2907, 2908,
+ 2909, 2910, 2911, 2912, 2847, 2398, 2846, 2849, 2850, 2626,
+ 2844, 2628, 2919, 2426, 1300, 2923, 2854, 2924, 2926, 803,
+ 2929, 2931, 2855, 2933, 2934, 2935, 2936, 2639, 2640, 2641,
+ 2642, 2942, 2386, 2369, 1523, 2864, 2437, 2435, 2434, 2227,
+ 2861, 2862, 2368, 2863, 1297, 2067, 2865, 2367, 2867, 2916,
+ 2869, 2366, 2975, 1297, 2971, 3424, 2920, 2061, 2057, 1297,
+ 2404, 2355, 754, 753, 665, 1297, 2964, 2965, 657, 649,
+ 2969, 752, 751, 2989, 2819, 1297, 2349, 3408, 2543, 2833,
+ 2545, 1297, 2471, 2829, 1294, 1278, 1295, 1296, 1487, 1513,
+ 643, 2451, 939, 1294, 2856, 1295, 1296, 3331, 1294, 2945,
+ 1295, 1296, 1294, 1297, 1295, 1296, 2249, 2940, 2941, 2879,
+ 1486, 1297, 1294, 1897, 1295, 1296, 1898, 3338, 2801, 3042,
+ 2948, 2782, 2519, 2955, 2149, 65, 43, 1294, 2348, 1295,
+ 1296, 1297, 2970, 2960, 3302, 3368, 2943, 2347, 1297, 750,
+ 747, 3095, 1297, 2346, 3096, 2977, 3097, 2646, 2647, 2345,
+ 3351, 2984, 2985, 3352, 2980, 1297, 2982, 746, 3353, 2344,
+ 1925, 2499, 1275, 3445, 1792, 2343, 1297, 90, 2990, 3031,
+ 3032, 1297, 2062, 2820, 2821, 3046, 2983, 2991, 34, 1294,
+ 33, 1295, 1296, 32, 31, 30, 25, 2342, 1294, 24,
+ 1295, 1296, 3052, 3053, 1294, 2341, 1295, 1296, 23, 189,
+ 1294, 189, 1295, 1296, 189, 1297, 22, 3001, 3002, 3007,
+ 1294, 3009, 1295, 1296, 21, 2335, 1294, 3065, 1295, 1296,
+ 3069, 27, 2334, 1297, 20, 19, 2333, 2548, 2549, 2550,
+ 2551, 2552, 18, 2804, 638, 3466, 638, 638, 1294, 2330,
+ 1295, 1296, 3508, 117, 52, 49, 1294, 3080, 1295, 1296,
+ 2329, 1804, 2562, 47, 125, 2328, 638, 189, 3054, 3035,
+ 124, 50, 3084, 3039, 3040, 3041, 1294, 46, 1295, 1296,
+ 1053, 44, 2570, 1294, 3092, 1295, 1296, 1294, 29, 1295,
+ 1296, 28, 3099, 17, 1340, 16, 3074, 15, 14, 2326,
+ 1294, 13, 1295, 1296, 12, 11, 7, 6, 2574, 37,
+ 36, 1294, 35, 1295, 1296, 1795, 1294, 2322, 1295, 1296,
+ 26, 3070, 2438, 2441, 2442, 2443, 2439, 906, 2440, 2444,
+ 2598, 2599, 4, 1297, 2601, 3091, 2506, 2603, 2151, 3113,
+ 0, 0, 0, 0, 0, 0, 1297, 0, 0, 0,
+ 1294, 0, 1295, 1296, 1297, 1852, 3142, 2610, 2611, 2612,
+ 1850, 0, 0, 0, 0, 0, 0, 0, 1294, 2617,
+ 1295, 1296, 2619, 2620, 2621, 0, 0, 0, 2622, 2623,
+ 3123, 3114, 1945, 2625, 0, 3138, 2627, 2701, 3122, 2629,
+ 2630, 2631, 2632, 3132, 3136, 3130, 3134, 2633, 1945, 1945,
+ 1945, 1945, 1945, 0, 1297, 0, 0, 0, 2704, 0,
+ 0, 0, 2704, 3241, 3143, 3144, 906, 2321, 3228, 39,
+ 3146, 0, 3248, 2656, 2657, 2658, 2659, 2660, 2661, 0,
+ 2317, 1297, 2662, 2663, 0, 2664, 1340, 2665, 2315, 0,
+ 0, 0, 3258, 3259, 3224, 3261, 0, 3262, 3263, 0,
+ 3225, 0, 3266, 3267, 3268, 0, 3270, 3273, 3271, 0,
+ 3223, 3272, 3240, 0, 1852, 3275, 3252, 3245, 1294, 1850,
+ 1295, 1296, 3282, 3284, 3285, 3287, 3289, 3290, 3292, 3232,
+ 3244, 1294, 2696, 1295, 1296, 0, 0, 0, 2280, 1294,
+ 0, 1295, 1296, 189, 0, 0, 0, 638, 638, 0,
+ 0, 0, 3235, 3236, 3237, 0, 0, 2726, 3276, 0,
+ 0, 0, 0, 189, 0, 2269, 3322, 3278, 0, 0,
+ 0, 3281, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3318, 0, 3300, 638, 3297, 3298, 189, 2778, 3299, 1294,
+ 0, 1295, 1296, 0, 0, 0, 0, 0, 638, 0,
+ 0, 0, 0, 0, 189, 3317, 0, 0, 3321, 0,
+ 3315, 0, 0, 2816, 0, 0, 1294, 2788, 1295, 1296,
+ 0, 0, 0, 0, 0, 0, 0, 2828, 2704, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3082, 0, 0,
+ 0, 638, 0, 0, 0, 0, 2845, 0, 0, 2848,
+ 0, 0, 0, 0, 1340, 0, 0, 0, 0, 3323,
+ 638, 638, 0, 638, 0, 638, 638, 0, 638, 638,
+ 638, 638, 638, 638, 0, 0, 0, 0, 0, 0,
+ 0, 1340, 0, 0, 1340, 638, 1340, 189, 0, 0,
+ 0, 0, 3347, 3342, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3334, 189, 3337, 2877,
0, 0, 3363, 0, 0, 0, 0, 0, 3364, 3365,
- 0, 0, 0, 0, 0, 0, 2744, 0, 0, 183,
- 0, 0, 0, 0, 3330, 0, 0, 0, 0, 3357,
- 3376, 0, 3358, 0, 2739, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 3366, 2755, 2756, 39, 0, 0, 3401, 3402, 2740, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 3373, 0,
- 3411, 3413, 3415, 3381, 3408, 3409, 0, 3383, 3386, 3394,
- 3391, 3388, 3387, 3385, 3390, 3389, 2701, 0, 0, 0,
- 0, 2746, 0, 3252, 3396, 3443, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 3419, 0, 0, 0, 3345,
- 0, 0, 0, 3422, 3440, 3430, 3435, 0, 0, 0,
- 0, 0, 0, 1340, 0, 3408, 3409, 3449, 0, 39,
- 167, 3447, 3359, 0, 3460, 3360, 1348, 3361, 0, 173,
- 0, 0, 0, 3378, 0, 0, 0, 0, 3458, 0,
- 3463, 0, 0, 0, 1804, 0, 0, 0, 2754, 0,
- 0, 0, 0, 0, 0, 0, 3481, 3483, 3485, 0,
- 2757, 3478, 3476, 0, 0, 0, 0, 0, 0, 3487,
- 189, 0, 0, 3491, 638, 638, 3486, 3493, 0, 0,
- 0, 0, 0, 1852, 0, 1850, 0, 3504, 3490, 0,
- 189, 3408, 3409, 3501, 3489, 0, 3507, 0, 3306, 0,
- 0, 0, 3515, 3516, 3517, 3513, 0, 0, 0, 0,
- 638, 0, 0, 189, 0, 0, 0, 0, 0, 0,
- 0, 3441, 0, 0, 3523, 638, 3524, 3525, 3272, 0,
- 0, 189, 0, 0, 0, 0, 0, 1852, 0, 1850,
- 0, 0, 3522, 0, 0, 0, 0, 0, 158, 0,
- 0, 3455, 0, 3456, 0, 3457, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 638, 0,
- 0, 2743, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1340, 0, 0, 0, 0, 0, 638, 638, 0,
- 638, 0, 638, 638, 0, 638, 638, 638, 638, 638,
- 638, 0, 0, 0, 0, 0, 0, 0, 1340, 0,
- 0, 1340, 638, 1340, 189, 0, 184, 0, 0, 0,
- 0, 3502, 0, 3503, 0, 0, 0, 1616, 0, 0,
- 0, 0, 0, 0, 189, 0, 0, 0, 0, 0,
- 123, 0, 145, 0, 0, 0, 0, 638, 0, 189,
- 3346, 0, 0, 166, 0, 0, 0, 0, 3356, 0,
- 0, 0, 0, 638, 0, 189, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 2046,
- 0, 189, 0, 0, 156, 0, 0, 2050, 189, 2053,
- 144, 0, 1804, 0, 0, 0, 0, 189, 189, 189,
- 189, 189, 189, 189, 189, 189, 638, 0, 0, 163,
- 0, 0, 164, 0, 0, 0, 0, 0, 0, 0,
- 159, 0, 0, 0, 0, 0, 0, 171, 0, 0,
- 0, 1620, 1621, 155, 154, 183, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 2747, 0, 0, 0,
- 2751, 0, 0, 0, 0, 0, 0, 2750, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 179, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2752, 0, 0, 0, 0, 2748, 0, 0, 814,
- 0, 2749, 81, 0, 0, 0, 0, 0, 0, 160,
- 165, 162, 168, 169, 170, 172, 174, 175, 176, 177,
- 0, 0, 0, 0, 0, 178, 180, 181, 182, 0,
- 0, 0, 0, 0, 149, 1622, 152, 0, 1619, 0,
- 150, 151, 0, 0, 0, 0, 167, 0, 0, 0,
- 0, 0, 0, 0, 0, 173, 0, 0, 0, 3462,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1564,
- 638, 638, 0, 0, 0, 0, 0, 0, 638, 878,
- 0, 81, 0, 189, 0, 0, 0, 0, 1804, 0,
- 0, 0, 0, 0, 0, 2206, 0, 0, 0, 0,
- 878, 0, 0, 0, 2223, 2224, 0, 0, 2228, 0,
- 0, 0, 0, 0, 0, 938, 0, 2231, 0, 0,
- 0, 0, 0, 0, 2234, 0, 0, 0, 0, 0,
- 0, 638, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1340, 0, 0, 0, 0, 0, 0, 0, 0,
- 2237, 0, 638, 0, 0, 0, 0, 0, 1340, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 158, 0, 0, 0, 0, 0,
- 0, 0, 0, 638, 638, 0, 0, 0, 0, 0,
+ 638, 0, 189, 2886, 0, 0, 3330, 0, 0, 0,
+ 3339, 0, 0, 0, 0, 0, 638, 0, 189, 0,
+ 3376, 0, 3357, 0, 0, 3358, 0, 0, 0, 0,
+ 0, 0, 0, 39, 189, 0, 0, 0, 2944, 0,
+ 0, 189, 3366, 0, 0, 0, 3402, 3403, 3373, 0,
+ 189, 189, 189, 189, 189, 189, 189, 189, 189, 638,
+ 3412, 3414, 3416, 3409, 0, 3410, 0, 0, 3378, 3383,
+ 2701, 3386, 3394, 3391, 3388, 3387, 3385, 0, 3390, 3252,
+ 3397, 3389, 0, 0, 0, 3444, 3381, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 3420, 0,
+ 0, 0, 0, 3423, 0, 0, 0, 3441, 3431, 39,
+ 3436, 0, 0, 0, 0, 3409, 0, 3410, 3451, 0,
+ 0, 0, 0, 3005, 3449, 3462, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 3306, 0,
+ 1348, 0, 3020, 0, 3460, 3021, 3022, 3023, 3465, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 3484, 3486,
+ 3488, 0, 0, 3478, 0, 0, 0, 0, 3481, 1749,
+ 1852, 3493, 3027, 3480, 0, 1850, 0, 0, 3489, 0,
+ 3492, 3490, 0, 3496, 3494, 0, 0, 0, 0, 0,
+ 3507, 0, 0, 3409, 0, 3410, 3504, 0, 0, 0,
+ 0, 3510, 0, 0, 0, 3051, 3519, 3520, 0, 3518,
+ 3516, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 638, 638, 1852, 3525, 3526, 3527, 3528,
+ 1850, 638, 3272, 0, 0, 0, 189, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3071, 0, 3072, 0, 0, 3073, 0, 0, 3076, 3077,
+ 0, 0, 0, 0, 0, 0, 0, 3081, 0, 0,
+ 0, 0, 0, 0, 638, 0, 0, 3083, 0, 0,
+ 3346, 0, 0, 0, 1340, 0, 0, 0, 3356, 0,
+ 0, 0, 0, 0, 0, 638, 0, 0, 0, 3100,
+ 0, 1340, 3101, 0, 3102, 3103, 0, 3104, 0, 3105,
+ 0, 0, 0, 0, 3106, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 638, 638, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3131,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3139, 0, 0, 3141, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 3145, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1983, 0, 0, 0,
+ 0, 0, 0, 3220, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1552, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1983, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 153, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
- 0, 638, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1565, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 0, 0, 638, 0,
- 0, 0, 0, 146, 0, 0, 147, 0, 189, 0,
- 0, 0, 638, 0, 0, 1983, 189, 0, 189, 0,
- 189, 189, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 638, 159, 0, 0, 0,
- 0, 0, 0, 171, 0, 1578, 1581, 1582, 1583, 1584,
- 1585, 1586, 0, 1587, 1588, 1589, 1590, 1591, 1566, 1567,
- 1568, 1569, 1550, 1551, 1579, 0, 1553, 0, 1554, 1555,
- 1556, 1557, 1558, 1559, 1560, 1561, 1562, 0, 0, 1563,
- 1570, 1571, 1572, 1573, 179, 1574, 1575, 1576, 1577, 0,
- 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 2451, 0, 0, 638, 0, 160, 165, 162, 168, 169,
- 170, 172, 174, 175, 176, 177, 0, 0, 0, 0,
- 0, 178, 180, 181, 182, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 638, 0, 0, 0, 0, 638, 0, 0, 0, 638,
- 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2499, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 189, 0, 0,
- 0, 0, 0, 0, 189, 0, 0, 0, 0, 1580,
- 0, 0, 0, 189, 189, 0, 0, 189, 0, 189,
- 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
- 0, 0, 0, 189, 0, 0, 2548, 2549, 2550, 2551,
- 2552, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 1804, 2562, 0, 0, 638, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2570, 0, 0, 0, 0, 0, 1113, 0, 1113,
- 1113, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1340,
- 0, 1983, 0, 0, 0, 0, 878, 1337, 1342, 1343,
- 0, 1346, 0, 1347, 1349, 1350, 1351, 0, 1354, 1355,
- 1357, 1357, 0, 1357, 1361, 1361, 1363, 1364, 1365, 1366,
- 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376,
- 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386,
- 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396,
- 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406,
- 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416,
- 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426,
- 1427, 0, 0, 0, 0, 1428, 0, 1430, 1431, 1432,
- 1433, 1434, 0, 0, 0, 0, 0, 0, 0, 0,
- 1361, 1361, 1361, 1361, 1361, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1440, 1441, 1442, 1443, 1444, 1445,
- 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 189, 0, 0, 0, 0, 1467, 0, 0, 189, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 638,
- 0, 0, 0, 184, 0, 0, 0, 0, 0, 0,
- 638, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 123, 0, 145,
- 0, 0, 0, 0, 189, 0, 2778, 0, 1473, 189,
- 166, 0, 0, 0, 878, 0, 0, 0, 878, 0,
- 0, 0, 0, 0, 878, 0, 0, 0, 0, 0,
- 0, 0, 2816, 0, 0, 0, 0, 0, 0, 0,
- 0, 156, 0, 0, 0, 0, 2828, 144, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2845, 163, 0, 2848, 164,
- 0, 0, 0, 638, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 0, 0, 0, 189, 0, 132, 133,
- 155, 154, 183, 0, 0, 0, 0, 0, 0, 0,
- 638, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 0, 0, 0, 0, 0, 0, 638, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1340, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 189, 189, 189, 189,
+ 0, 0, 0, 0, 638, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 189, 0, 0, 0, 0, 0, 0, 2944, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 189, 149, 130, 152, 137, 129, 0, 150, 151, 0,
- 0, 0, 0, 167, 0, 0, 0, 0, 0, 0,
- 0, 638, 173, 138, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1564, 141, 139, 134,
- 135, 136, 140, 0, 0, 0, 0, 0, 0, 131,
- 0, 0, 1113, 0, 0, 0, 0, 0, 142, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 189, 0,
+ 0, 638, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 189, 0, 0, 0, 638, 0, 0, 1983, 189,
+ 0, 189, 0, 189, 189, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 638, 0,
- 0, 0, 3005, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 3020, 0, 0, 3021, 3022, 3023, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 638, 0, 0, 0,
- 0, 0, 0, 0, 638, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3305, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 158, 0, 0, 0, 638, 0, 0, 0, 0,
- 798, 0, 0, 0, 0, 0, 0, 0, 0, 189,
+ 0, 0, 0, 0, 3164, 3166, 3165, 3183, 3184, 3185,
+ 3186, 3187, 3188, 3189, 704, 0, 0, 0, 798, 0,
0, 0, 0, 638, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 1552, 1340, 0, 0, 638, 638, 1340, 189, 189, 189,
- 189, 189, 0, 0, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 0, 0, 189, 0, 189, 617, 0,
- 189, 189, 189, 0, 637, 0, 1113, 1113, 0, 153,
- 0, 0, 0, 0, 0, 81, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 0, 0, 0, 0,
- 0, 0, 0, 1565, 637, 0, 637, 0, 638, 0,
- 0, 1340, 0, 0, 0, 0, 638, 0, 0, 0,
- 146, 189, 0, 147, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 189, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 159, 189, 0, 0, 189, 0, 0,
- 171, 0, 1578, 1581, 1582, 1583, 1584, 1585, 1586, 0,
- 1587, 1588, 1589, 1590, 1591, 1566, 1567, 1568, 1569, 1550,
- 1551, 1579, 0, 1553, 0, 1554, 1555, 1556, 1557, 1558,
- 1559, 1560, 1561, 1562, 0, 0, 1563, 1570, 1571, 1572,
- 1573, 179, 1574, 1575, 1576, 1577, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 638, 0, 0, 0, 0, 0, 638, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 638, 0, 0,
- 0, 0, 160, 165, 162, 168, 169, 170, 172, 174,
- 175, 176, 177, 0, 0, 0, 0, 0, 178, 180,
- 181, 182, 80, 41, 42, 82, 189, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 617, 0, 0, 0,
+ 0, 0, 637, 0, 0, 0, 0, 0, 0, 0,
+ 3343, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 638, 0, 0, 0, 0, 638, 0,
+ 0, 0, 638, 638, 0, 0, 0, 3345, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 637, 0, 637, 0, 0, 0, 0, 0,
+ 3359, 0, 0, 3360, 0, 3361, 0, 0, 0, 0,
+ 189, 0, 0, 0, 0, 0, 0, 189, 0, 0,
+ 0, 0, 0, 0, 0, 0, 189, 189, 0, 0,
+ 189, 0, 189, 0, 0, 0, 0, 0, 0, 189,
+ 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 86, 0, 0, 0, 45, 71, 72, 0,
- 69, 73, 0, 0, 0, 0, 0, 0, 0, 70,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 2060, 0, 0, 0,
- 0, 0, 189, 0, 0, 0, 0, 0, 58, 0,
+ 0, 0, 189, 0, 0, 0, 0, 638, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 89, 0, 0, 0, 0, 89, 1580, 0, 816, 0,
- 0, 189, 804, 817, 818, 819, 820, 805, 0, 0,
- 806, 807, 0, 808, 0, 0, 0, 0, 0, 0,
- 189, 0, 0, 189, 189, 189, 0, 813, 821, 822,
- 0, 0, 0, 638, 638, 0, 938, 0, 0, 0,
+ 0, 0, 3170, 0, 0, 0, 0, 0, 0, 3442,
+ 0, 0, 0, 0, 0, 0, 0, 3178, 3179, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 3343,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 938,
- 0, 0, 0, 0, 2822, 2823, 0, 0, 0, 0,
- 638, 638, 638, 638, 0, 0, 823, 824, 825, 826,
- 827, 828, 829, 830, 831, 832, 833, 834, 835, 836,
- 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 854, 855, 856,
- 857, 858, 859, 860, 861, 862, 863, 864, 48, 51,
- 54, 53, 56, 0, 68, 0, 0, 77, 74, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 3457, 0, 3458, 0, 3459, 0, 0, 0, 0, 0,
+ 0, 0, 1340, 0, 1983, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 57, 85, 84, 0, 0, 66, 67, 55, 2824, 0,
- 0, 0, 0, 75, 76, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 189, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 770,
+ 0, 683, 774, 685, 771, 772, 0, 681, 684, 773,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1340, 0, 0, 0, 0,
- 638, 0, 638, 0, 59, 60, 0, 61, 62, 63,
- 64, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2825, 2826, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2251, 0, 0, 637, 1266,
- 637, 637, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 3505, 0, 3506, 702, 703, 3163, 3167, 3168,
+ 3169, 3180, 3181, 3182, 3190, 3192, 735, 3191, 3193, 3194,
+ 3195, 3198, 3199, 3200, 3201, 3196, 3197, 3202, 3147, 3151,
+ 3148, 3149, 3150, 3162, 3152, 3153, 3154, 3155, 3156, 3157,
+ 3158, 3159, 3160, 3161, 3203, 3204, 3205, 3206, 3207, 3208,
+ 3173, 3177, 3176, 3174, 3175, 3171, 3172, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 775,
+ 0, 776, 0, 0, 780, 816, 0, 1975, 782, 781,
+ 817, 783, 749, 748, 0, 3464, 777, 778, 0, 779,
+ 1851, 0, 0, 189, 0, 1564, 0, 0, 0, 0,
+ 0, 189, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 638, 0, 0, 0, 0, 0, 0, 0,
- 637, 0, 0, 0, 0, 189, 0, 0, 638, 0,
- 0, 0, 0, 0, 2261, 0, 0, 0, 0, 0,
- 0, 638, 0, 0, 0, 0, 0, 0, 1339, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 770, 0, 0, 774, 0, 771, 772, 0, 0,
- 0, 773, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 83, 0, 0, 0,
- 0, 638, 0, 0, 0, 638, 638, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 638, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 189, 0, 0,
+ 0, 0, 189, 0, 3209, 3210, 3211, 3212, 3213, 3214,
+ 3215, 3216, 0, 823, 824, 825, 826, 827, 828, 829,
+ 830, 831, 832, 833, 834, 835, 836, 837, 838, 839,
+ 840, 841, 842, 843, 844, 845, 846, 847, 848, 849,
+ 850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
+ 860, 861, 862, 863, 864, 0, 0, 0, 0, 0,
+ 184, 0, 0, 0, 0, 0, 638, 0, 0, 0,
+ 0, 2512, 189, 0, 0, 0, 0, 0, 0, 189,
+ 0, 0, 0, 0, 123, 0, 145, 0, 0, 1552,
+ 0, 0, 0, 638, 0, 0, 0, 166, 0, 0,
+ 638, 0, 0, 0, 0, 0, 0, 0, 0, 638,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1340, 0, 0, 156, 0,
+ 0, 0, 0, 0, 144, 0, 0, 0, 189, 189,
+ 189, 189, 189, 0, 0, 0, 637, 1266, 637, 637,
+ 0, 0, 0, 163, 0, 0, 164, 0, 0, 0,
+ 0, 0, 189, 189, 0, 0, 0, 0, 637, 0,
+ 0, 0, 1565, 0, 0, 1620, 1621, 155, 154, 183,
+ 0, 0, 0, 189, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1339, 0, 0, 0,
0, 0, 0, 0, 638, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 88, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2359, 0, 0, 0, 0, 755, 0, 0, 0, 0,
+ 0, 1578, 1581, 1582, 1583, 1584, 1585, 1586, 0, 1587,
+ 1588, 1589, 1590, 1591, 1566, 1567, 1568, 1569, 1550, 1551,
+ 1579, 638, 1553, 0, 1554, 1555, 1556, 1557, 1558, 1559,
+ 1560, 1561, 1562, 0, 0, 1563, 1570, 1571, 1572, 1573,
+ 0, 1574, 1575, 1576, 1577, 0, 0, 0, 149, 1622,
+ 152, 0, 1619, 0, 150, 151, 0, 0, 0, 638,
+ 167, 0, 0, 0, 0, 0, 0, 638, 0, 173,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1339, 0, 2391, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 638, 0, 0, 0, 0, 0, 0,
- 878, 0, 0, 0, 0, 0, 0, 0, 0, 189,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 636,
- 0, 2430, 2431, 0, 0, 0, 0, 638, 189, 0,
- 2060, 0, 0, 878, 2450, 0, 0, 0, 0, 0,
- 0, 637, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 930,
- 0, 937, 0, 0, 0, 0, 0, 637, 638, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1340, 0,
- 638, 0, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1593, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1602, 0, 638, 638, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 637, 638, 1628, 0, 0,
- 0, 0, 0, 2526, 0, 1637, 0, 0, 1339, 1639,
- 189, 638, 1642, 1643, 637, 637, 0, 637, 0, 637,
- 637, 0, 637, 637, 637, 637, 637, 637, 0, 0,
- 0, 0, 0, 0, 0, 1339, 1674, 1675, 1339, 637,
- 1339, 0, 1680, 0, 0, 0, 0, 0, 89, 0,
- 0, 816, 0, 0, 638, 804, 817, 818, 819, 820,
- 805, 0, 0, 806, 807, 0, 808, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 638, 0, 0, 0,
- 813, 821, 822, 0, 0, 0, 0, 1742, 0, 0,
- 637, 0, 0, 638, 0, 638, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 2822, 2823, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 823,
- 824, 825, 826, 827, 828, 829, 830, 831, 832, 833,
- 834, 835, 836, 837, 838, 839, 840, 841, 842, 843,
- 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
- 854, 855, 856, 857, 858, 859, 860, 861, 862, 863,
- 864, 0, 2643, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 816, 0,
- 1975, 2824, 0, 817, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1851, 0, 2060, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 2705, 0,
- 81, 0, 0, 2060, 2060, 2060, 2060, 2060, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2060, 0, 0, 2060, 0, 0, 0,
- 0, 0, 0, 2825, 2826, 0, 0, 637, 637, 0,
- 0, 0, 0, 0, 0, 637, 823, 824, 825, 826,
- 827, 828, 829, 830, 831, 832, 833, 834, 835, 836,
- 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 854, 855, 856,
- 857, 858, 859, 860, 861, 862, 863, 864, 0, 2810,
- 0, 0, 0, 0, 0, 0, 0, 0, 637, 2818,
+ 0, 0, 0, 0, 0, 0, 0, 0, 638, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1339, 0,
- 0, 0, 0, 816, 0, 0, 0, 1860, 817, 637,
- 0, 0, 0, 0, 0, 1339, 0, 0, 1851, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 637, 637, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1115, 0, 1115, 1115, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1273, 0, 0, 0, 0,
- 637, 823, 824, 825, 826, 827, 828, 829, 830, 831,
- 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
- 842, 843, 844, 845, 846, 847, 848, 849, 850, 851,
- 852, 853, 854, 855, 856, 857, 858, 859, 860, 861,
- 862, 863, 864, 0, 0, 0, 0, 0, 637, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 189, 0, 0, 0, 638, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 637,
- 0, 0, 637, 0, 0, 0, 0, 0, 2060, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2988, 637, 0, 0, 0, 0, 0, 0, 0,
+ 638, 0, 0, 0, 1340, 0, 0, 638, 638, 1340,
+ 189, 189, 189, 189, 189, 0, 0, 0, 0, 0,
+ 0, 0, 189, 0, 0, 0, 0, 0, 189, 0,
+ 189, 0, 0, 189, 189, 189, 0, 0, 0, 637,
+ 637, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1580, 0, 0, 158, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 637, 0, 0, 189, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 637, 638, 0, 0, 1340, 0, 0, 0, 0, 638,
+ 1593, 0, 0, 0, 189, 0, 0, 0, 0, 0,
+ 1602, 0, 0, 0, 0, 0, 0, 0, 189, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 637, 0, 1628, 153, 189, 0, 0,
+ 189, 0, 0, 1637, 0, 0, 1339, 1639, 0, 0,
+ 1642, 1643, 637, 637, 0, 637, 0, 637, 637, 0,
+ 637, 637, 637, 637, 637, 637, 0, 0, 0, 0,
+ 0, 0, 0, 1339, 1674, 1675, 1339, 637, 1339, 0,
+ 1680, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 146, 0, 0,
+ 147, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 637, 0, 0, 0, 0, 0, 0, 0,
+ 638, 0, 0, 0, 0, 1742, 0, 0, 637, 0,
+ 159, 0, 0, 0, 0, 0, 0, 171, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 189,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 637, 0, 0, 0, 0, 0, 0, 179, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 0, 0, 2139, 2140, 2141, 0, 0, 0,
+ 0, 0, 0, 0, 0, 189, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 160,
+ 165, 162, 168, 169, 170, 172, 174, 175, 176, 177,
+ 0, 0, 0, 0, 189, 178, 180, 181, 182, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 184,
+ 0, 0, 0, 189, 0, 0, 189, 189, 189, 0,
+ 1616, 0, 0, 0, 0, 0, 638, 638, 0, 0,
+ 0, 0, 0, 123, 814, 145, 0, 81, 0, 0,
+ 0, 0, 0, 0, 0, 0, 166, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 638, 638, 638, 638, 156, 0, 0,
+ 0, 0, 0, 144, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1564, 0, 0, 637, 637, 0, 0, 0,
+ 0, 0, 163, 637, 0, 164, 0, 0, 0, 0,
+ 0, 0, 0, 0, 878, 0, 81, 0, 0, 0,
+ 0, 0, 0, 0, 1620, 1621, 155, 154, 183, 0,
+ 0, 0, 0, 0, 0, 878, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 938, 0, 0, 0, 0, 0, 637, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1339, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1860, 0, 637, 0, 189,
+ 0, 0, 0, 1339, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1340, 0,
+ 0, 0, 0, 638, 0, 638, 0, 0, 637, 637,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 0, 0, 0, 0,
- 637, 1637, 0, 0, 1637, 0, 1637, 0, 0, 0,
- 0, 0, 2171, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1475, 1476, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 149, 1622, 152,
+ 0, 1619, 0, 150, 151, 0, 1552, 0, 0, 167,
+ 0, 0, 0, 0, 0, 0, 0, 0, 173, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 637, 0,
+ 0, 0, 0, 0, 0, 638, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 189, 0,
+ 0, 638, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 638, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 637, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1565,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 637, 0, 0, 0, 637, 637, 0, 0,
- 0, 0, 1519, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1537, 0, 0,
+ 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 638, 0, 0, 637, 638, 638,
+ 637, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 158, 0, 0,
+ 637, 0, 0, 0, 0, 0, 0, 638, 1578, 1581,
+ 1582, 1583, 1584, 1585, 1586, 0, 1587, 1588, 1589, 1590,
+ 1591, 1566, 1567, 1568, 1569, 1550, 1551, 1579, 0, 1553,
+ 0, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562,
+ 0, 0, 1563, 1570, 1571, 1572, 1573, 0, 1574, 1575,
+ 1576, 1577, 0, 0, 0, 637, 0, 0, 0, 0,
+ 0, 0, 2139, 2140, 2141, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 153, 0, 0, 0, 0,
+ 0, 0, 637, 0, 0, 0, 638, 0, 637, 1637,
+ 0, 0, 1637, 0, 1637, 0, 0, 0, 0, 0,
+ 2171, 0, 189, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 638, 189, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 637, 146, 0, 0, 147,
+ 637, 0, 0, 0, 637, 637, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 159,
+ 0, 638, 0, 0, 0, 0, 171, 0, 0, 0,
+ 0, 1340, 0, 638, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1580, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 638, 1983, 0,
+ 0, 0, 0, 0, 0, 0, 0, 179, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 638,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 189, 638, 0, 0, 0, 0, 637,
+ 0, 0, 0, 0, 0, 0, 0, 0, 160, 165,
+ 162, 168, 169, 170, 172, 174, 175, 176, 177, 0,
+ 0, 0, 0, 0, 178, 180, 181, 182, 0, 0,
+ 0, 0, 1113, 0, 1113, 1113, 0, 0, 638, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 3107, 0,
- 3111, 3112, 0, 0, 0, 0, 0, 0, 0, 0,
- 930, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2705, 0, 81, 0, 2705, 0, 1645,
- 1645, 0, 1645, 0, 1645, 1645, 0, 1654, 1645, 1645,
- 1645, 1645, 1645, 0, 0, 0, 0, 0, 0, 0,
- 0, 637, 0, 0, 930, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 638, 0, 0, 0, 1339, 189, 637, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 638, 0,
+ 638, 878, 1337, 1342, 1343, 0, 1346, 0, 1347, 1349,
+ 1350, 1351, 0, 1354, 1355, 1357, 1357, 0, 1357, 1361,
+ 1361, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371,
+ 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381,
+ 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391,
+ 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401,
+ 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411,
+ 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421,
+ 1422, 1423, 1424, 1425, 1426, 1427, 0, 0, 0, 0,
+ 1428, 0, 1430, 1431, 1432, 1433, 1434, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1361, 1361, 1361, 1361, 1361,
+ 0, 0, 0, 0, 0, 80, 41, 42, 82, 1440,
+ 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450,
+ 1451, 1452, 1453, 0, 0, 86, 0, 0, 0, 45,
+ 71, 72, 0, 69, 73, 0, 0, 0, 0, 0,
+ 1467, 0, 70, 0, 637, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1722,
- 0, 0, 0, 0, 3233, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1746, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1339, 0, 637, 0,
+ 0, 58, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 756, 89, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1473, 0, 0, 0, 0, 0, 878,
+ 2464, 0, 0, 878, 0, 0, 0, 0, 0, 878,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1115, 0,
+ 0, 0, 0, 0, 0, 187, 0, 0, 586, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 637, 0,
+ 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 888, 0, 0, 0, 637, 0, 0, 0, 0,
+ 0, 0, 637, 0, 0, 0, 1637, 1637, 907, 907,
+ 0, 637, 0, 0, 0, 0, 0, 586, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1339, 2536, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 48, 51, 54, 53, 56, 0, 68, 0, 0,
+ 77, 74, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 57, 85, 84, 0, 0, 66, 67,
+ 55, 0, 0, 0, 0, 0, 75, 76, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2705, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1363, 1364, 1365,
- 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375,
- 1376, 1377, 1378, 1382, 1383, 1384, 1385, 1386, 1387, 1388,
- 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398,
- 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408,
- 1409, 1410, 1411, 1413, 1414, 1415, 1416, 1417, 1418, 1419,
- 1420, 1421, 1422, 1440, 1441, 1442, 1443, 1444, 1445, 1446,
- 1447, 1448, 1449, 1450, 1451, 1452, 1453, 0, 0, 0,
0, 0, 0, 0, 0, 0, 637, 0, 0, 0,
- 0, 3332, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 81,
- 0, 0, 1115, 1115, 0, 0, 0, 0, 0, 0,
- 1793, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 2464, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1846, 0, 0, 0, 0, 0, 0,
- 3377, 0, 0, 0, 0, 81, 0, 0, 0, 756,
- 637, 0, 0, 0, 1862, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 0, 0,
- 0, 0, 0, 0, 637, 1895, 1896, 0, 1637, 1637,
+ 0, 0, 0, 0, 0, 0, 0, 59, 60, 0,
+ 61, 62, 63, 64, 0, 0, 0, 0, 0, 755,
+ 0, 0, 0, 0, 0, 0, 0, 1113, 0, 0,
0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 0, 187, 0, 0, 586, 0, 0, 0, 1339,
- 2536, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 586, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1115, 0, 0, 888, 0,
- 0, 3459, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 907, 907, 0, 0, 0,
- 0, 0, 0, 0, 586, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2020, 0, 0, 0, 0, 637, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 2032, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1519, 0, 0, 1115, 0, 0,
- 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 930, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 637, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 637, 0, 0, 0, 0, 0, 0, 0, 637,
+ 0, 0, 0, 636, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 937, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 637, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 930,
- 637, 0, 0, 0, 0, 937, 0, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 0, 0, 1339, 0,
- 0, 637, 637, 1339, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 637, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 637, 0,
+ 0, 0, 0, 930, 0, 937, 0, 0, 0, 83,
+ 0, 0, 637, 0, 0, 0, 1339, 0, 0, 637,
+ 637, 1339, 89, 0, 0, 816, 0, 0, 0, 804,
+ 817, 818, 819, 820, 805, 0, 0, 806, 807, 0,
+ 808, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 813, 821, 822, 0, 0, 0,
+ 0, 1113, 1113, 0, 0, 0, 0, 0, 0, 0,
+ 81, 88, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2773, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 930, 0, 0, 0, 0, 1846, 0, 0,
- 0, 1846, 1846, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2822, 2823, 637, 0, 0, 1339, 0, 0, 0,
+ 0, 637, 0, 823, 824, 825, 826, 827, 828, 829,
+ 830, 831, 832, 833, 834, 835, 836, 837, 838, 839,
+ 840, 841, 842, 843, 844, 845, 846, 847, 848, 849,
+ 850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
+ 860, 861, 862, 863, 864, 0, 0, 0, 0, 0,
+ 0, 0, 2852, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2773, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 637, 0, 0, 1339, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2824, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 637, 0, 0, 586, 0, 586, 0, 0,
+ 586, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 2852, 0, 2238, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 2825, 2826, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 586, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1341, 2060, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2995, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 770, 0,
+ 0, 774, 0, 771, 772, 0, 0, 0, 773, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 89, 0, 0, 816, 637, 637,
+ 0, 804, 817, 818, 819, 820, 805, 0, 0, 806,
+ 807, 938, 808, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 813, 821, 822, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1115, 637, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 938, 637, 637, 637, 637, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2822, 2823, 0, 0, 0, 0, 0,
+ 0, 0, 1341, 0, 0, 823, 824, 825, 826, 827,
+ 828, 829, 830, 831, 832, 833, 834, 835, 836, 837,
+ 838, 839, 840, 841, 842, 843, 844, 845, 846, 847,
+ 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864, 1115, 0, 1115,
+ 1115, 0, 0, 0, 0, 0, 0, 0, 0, 586,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1273,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 888,
+ 0, 0, 0, 0, 0, 0, 0, 2824, 0, 0,
+ 1339, 0, 0, 0, 0, 637, 0, 637, 0, 0,
+ 0, 0, 586, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 586, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2251, 0, 0, 0, 0, 0, 0, 637, 0, 2825,
+ 2826, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1341, 0, 0, 637, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 637, 0, 0, 2261,
+ 0, 0, 0, 0, 0, 0, 0, 1341, 0, 0,
+ 1341, 0, 1341, 586, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 586, 0, 586, 0, 0, 586, 0, 0,
+ 0, 0, 0, 1696, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 637, 0, 586, 0,
+ 637, 637, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1748, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 637,
+ 586, 0, 0, 0, 0, 0, 0, 586, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1771, 1772, 586, 586,
+ 586, 586, 586, 586, 586, 0, 0, 0, 0, 0,
+ 1475, 1476, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2359, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1519, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 2391, 637, 0,
+ 0, 1537, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 878, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 2995, 0, 0,
+ 0, 0, 637, 0, 0, 0, 2430, 2431, 0, 0,
+ 0, 0, 0, 0, 930, 2060, 0, 0, 878, 2450,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1645, 1645, 0, 1645, 0, 1645, 1645,
+ 0, 1654, 1645, 1645, 1645, 1645, 1645, 0, 0, 0,
+ 0, 0, 0, 637, 0, 0, 0, 0, 930, 0,
+ 0, 0, 0, 1339, 0, 637, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 637,
+ 637, 0, 586, 1722, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1746,
+ 0, 637, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 637, 0, 2526, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 637, 637, 0, 0, 0, 0, 0, 1341, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 2399, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 2414, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 637, 637, 637,
+ 1341, 0, 1115, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 907, 907, 0, 0, 0, 1341, 0, 0,
637, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 637, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 637, 0, 637, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 907, 1748, 907, 907, 907, 907, 907, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2495, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1341,
- 0, 0, 2032, 0, 0, 0, 0, 0, 0, 2520,
- 0, 0, 0, 0, 0, 0, 0, 0, 2525, 0,
- 0, 0, 1339, 0, 0, 0, 0, 637, 0, 637,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1696, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
+ 907, 0, 0, 0, 0, 0, 0, 2643, 0, 0,
+ 0, 0, 0, 0, 888, 0, 1115, 1115, 0, 0,
+ 0, 0, 0, 0, 1793, 0, 0, 586, 0, 0,
+ 0, 0, 0, 0, 1748, 586, 0, 586, 0, 586,
+ 2069, 0, 0, 816, 0, 0, 0, 0, 817, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1851, 0,
+ 2060, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1846, 0, 0,
+ 0, 0, 0, 2705, 0, 81, 0, 0, 2060, 2060,
+ 2060, 2060, 2060, 0, 0, 0, 0, 0, 1862, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2060, 0,
+ 0, 2060, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1895,
+ 1896, 823, 824, 825, 826, 827, 828, 829, 830, 831,
+ 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
+ 842, 843, 844, 845, 846, 847, 848, 849, 850, 851,
+ 852, 853, 854, 855, 856, 857, 858, 859, 860, 861,
+ 862, 863, 864, 0, 2810, 0, 0, 0, 0, 1115,
+ 0, 0, 0, 0, 2818, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 888, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 637,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 586,
- 0, 0, 0, 0, 0, 637, 0, 0, 0, 0,
- 0, 0, 0, 1846, 0, 0, 0, 586, 637, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 2020, 0, 0,
+ 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
+ 0, 0, 0, 586, 0, 0, 0, 0, 0, 0,
+ 0, 0, 586, 586, 2032, 0, 586, 0, 2230, 0,
+ 0, 0, 0, 0, 0, 586, 0, 0, 1519, 0,
+ 0, 1115, 586, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 930, 0, 0, 0, 0, 0, 0, 586, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1846, 0, 0, 0, 0, 0, 0, 1341, 637, 0,
- 0, 0, 637, 637, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1341, 0, 0, 1341, 2644, 1341,
- 586, 637, 0, 0, 0, 0, 1115, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1696, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 1645, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1748, 0, 0, 0, 2681, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 586, 0, 1115,
- 0, 0, 0, 0, 586, 0, 2708, 1645, 0, 0,
- 637, 0, 0, 1771, 1772, 586, 586, 586, 586, 586,
- 586, 586, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 937, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 637, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1341, 0,
+ 1748, 0, 0, 930, 0, 0, 0, 0, 0, 937,
+ 0, 0, 0, 2060, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 2988, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 930, 0, 0, 0,
+ 0, 1846, 0, 0, 0, 1846, 1846, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 930, 0, 0, 0, 0, 637, 0, 0, 2032, 0,
- 0, 0, 0, 0, 0, 1339, 0, 637, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 637, 637, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 637, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 586,
+ 0, 0, 0, 0, 0, 0, 0, 1696, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 637, 0, 0, 0, 0, 0, 0, 0, 2915,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 637, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 637, 0, 637, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 907,
- 907, 0, 0, 0, 1341, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2238, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 586, 0, 0, 0, 0, 586, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3107, 0, 3111, 3112, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1115, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2705, 0,
+ 81, 0, 2705, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 907, 1748,
- 907, 907, 907, 907, 907, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2032, 2032, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 586, 0,
+ 0, 0, 0, 0, 0, 2505, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1696, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 907, 0, 0,
- 0, 0, 3059, 3060, 3061, 3062, 0, 0, 0, 0,
- 0, 888, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 586, 0, 0, 0, 0, 0,
- 0, 1748, 586, 0, 586, 0, 586, 2069, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3233,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1341, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 586, 586, 586, 586, 586, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 586, 586,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 586,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 2399, 0, 0, 0, 0,
+ 0, 0, 0, 907, 0, 0, 2414, 0, 2705, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370,
+ 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1382, 1383,
+ 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393,
+ 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403,
+ 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1413, 1414,
+ 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1440, 1441,
+ 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451,
+ 1452, 1453, 907, 0, 0, 0, 0, 0, 0, 2495,
+ 0, 0, 0, 0, 0, 0, 3332, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 81, 0, 2032, 0, 0, 0,
+ 0, 0, 0, 2520, 0, 0, 0, 0, 586, 0,
+ 0, 0, 2525, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1341, 0, 0, 0, 0, 1341, 586, 586, 586, 586,
+ 586, 0, 0, 0, 0, 0, 0, 0, 2724, 0,
+ 0, 0, 0, 0, 1696, 0, 586, 0, 0, 586,
+ 2732, 1748, 0, 0, 0, 3377, 0, 0, 0, 0,
+ 81, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 3135, 0, 3137, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 586, 0, 0, 1846, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1341, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 586, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 586, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1846, 0, 0, 0, 0, 0,
+ 0, 0, 0, 586, 0, 0, 586, 3461, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 2644, 0, 0, 0, 0, 0, 0, 0,
+ 1115, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 2032, 0, 0, 0, 0, 0,
+ 0, 1645, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2681,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 3243, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1115, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 586, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 0, 0, 0, 0, 0, 0, 586,
- 586, 0, 0, 586, 0, 2230, 0, 0, 0, 0,
- 0, 0, 586, 0, 0, 0, 0, 0, 0, 586,
- 0, 0, 0, 3293, 0, 0, 0, 3293, 3293, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 2032, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1341, 0, 1748, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2032, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 2032,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 3370, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 3374, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1115, 1115, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
- 0, 0, 0, 0, 1696, 0, 0, 0, 3417, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 3425, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 0, 0, 586, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 3370, 0, 0, 0,
+ 2708, 1645, 0, 0, 0, 586, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 2032, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 2915, 0, 3425, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 0, 0, 0,
- 0, 0, 2505, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1341, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 586, 586, 586, 586, 586, 0, 0, 0, 0,
+ 0, 586, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 586, 0, 0, 0,
+ 0, 0, 0, 0, 930, 0, 0, 0, 0, 0,
+ 586, 0, 2032, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 586,
+ 0, 0, 586, 586, 586, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 907, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2915, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 907,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 586, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1696, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 1341, 586, 586, 586, 586, 586, 0, 0,
- 0, 0, 0, 0, 0, 2724, 0, 0, 0, 0,
- 0, 1696, 0, 586, 0, 0, 586, 2732, 1748, 0,
+ 0, 0, 0, 0, 1341, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 586, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1341, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 586, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 586, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2032,
+ 2032, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1696, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 586, 0, 0, 586, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3059, 3060, 3061, 3062,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2494,333 +2410,142 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 586, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3135, 0, 3137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 586, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1696, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 586, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 2032, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 586, 0, 0, 586,
- 586, 586, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3243, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1115, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1341, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3396, 0, 0, 3293, 0, 0,
+ 0, 3293, 3293, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1696,
+ 2032, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1748, 0, 0, 0, 0, 0, 0, 0, 2032,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2032, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 1696, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1341, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 3370, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3374, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1115, 1115, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 3418, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3426, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1696, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3370, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2032, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 380, 2915, 0, 3426, 1249, 1234, 496, 0, 1177, 1252,
+ 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399, 1162,
+ 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145, 1236,
+ 1195, 1251, 350, 254, 1127, 1151, 413, 1167, 196, 1215,
+ 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
+ 369, 411, 486, 405, 1258, 354, 1201, 0, 476, 384,
+ 0, 0, 0, 1181, 1240, 1189, 1227, 1176, 1214, 1135,
+ 1200, 1253, 1163, 1210, 1254, 309, 237, 311, 195, 396,
+ 477, 273, 0, 0, 0, 0, 3398, 800, 0, 0,
+ 0, 0, 3399, 0, 0, 0, 0, 228, 0, 0,
+ 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
+ 329, 334, 341, 347, 1159, 1207, 1248, 1160, 1209, 252,
+ 307, 259, 251, 501, 1259, 1239, 1124, 1188, 1247, 0,
+ 0, 219, 1250, 1183, 0, 1212, 0, 1265, 1119, 1203,
+ 0, 1122, 1126, 1261, 1243, 1154, 262, 0, 0, 0,
+ 0, 0, 0, 0, 1180, 1190, 1224, 1228, 1174, 0,
+ 0, 0, 0, 0, 0, 0, 1152, 0, 1199, 0,
+ 0, 0, 1131, 1123, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 380, 0, 0, 0, 1249, 1234, 496, 0, 1177,
- 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399,
- 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145,
- 1236, 1195, 1251, 350, 254, 1127, 1151, 413, 1167, 196,
- 1215, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 1258, 354, 1201, 0, 476,
- 384, 0, 0, 0, 1181, 1240, 1189, 1227, 1176, 1214,
- 1135, 1200, 1253, 1163, 1210, 1254, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 1696, 0, 3397, 627, 0,
- 0, 0, 0, 3398, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 586, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 1159, 1207, 1248, 1160, 1209,
- 252, 307, 259, 251, 501, 1259, 1239, 1124, 1188, 1247,
- 0, 0, 219, 1250, 1183, 0, 1212, 0, 1265, 1119,
- 1203, 0, 1122, 1126, 1261, 1243, 1154, 262, 0, 0,
- 0, 0, 0, 0, 0, 1180, 1190, 1224, 1228, 1174,
- 0, 0, 0, 0, 1341, 0, 0, 1152, 0, 1199,
- 0, 0, 0, 1131, 1123, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1178, 0, 0,
- 0, 0, 1134, 0, 1153, 1225, 1696, 1117, 284, 1128,
- 385, 244, 0, 1232, 1242, 1175, 541, 1246, 1173, 1172,
- 1219, 1132, 1238, 1166, 349, 1130, 316, 191, 215, 0,
- 1164, 395, 441, 453, 1237, 1149, 1158, 242, 1156, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 1198,
- 1217, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 1129, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 1144, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 1233, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 1222, 1264, 408, 452, 230, 522, 475,
- 1139, 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256,
- 1257, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 1226, 1133, 0, 1141, 1142, 1235, 1244, 1245, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 1197, 190,
- 211, 352, 1260, 434, 275, 559, 532, 527, 197, 213,
- 1136, 249, 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184,
- 1185, 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223,
- 1230, 1241, 1263, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 1196,
- 1202, 365, 268, 291, 306, 1211, 531, 481, 217, 446,
- 277, 240, 1229, 1231, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 1192, 1220,
- 360, 497, 498, 302, 380, 0, 0, 0, 1249, 1234,
- 496, 0, 1177, 1252, 1146, 1165, 1262, 1168, 1171, 1213,
- 1125, 1191, 399, 1162, 1118, 1150, 1120, 1157, 1121, 1148,
- 1179, 257, 1145, 1236, 1195, 1251, 350, 254, 1127, 1151,
- 413, 1167, 196, 1215, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 1258, 354,
- 1201, 0, 476, 384, 0, 0, 0, 1181, 1240, 1189,
- 1227, 1176, 1214, 1135, 1200, 1253, 1163, 1210, 1254, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 0, 188, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 1159, 1207,
- 1248, 1160, 1209, 252, 307, 259, 251, 501, 1259, 1239,
- 1124, 1188, 1247, 0, 0, 219, 1250, 1183, 0, 1212,
- 0, 1265, 1119, 1203, 0, 1122, 1126, 1261, 1243, 1154,
- 262, 0, 0, 0, 0, 0, 0, 0, 1180, 1190,
- 1224, 1228, 1174, 0, 0, 0, 0, 0, 2733, 0,
- 1152, 0, 1199, 0, 0, 0, 1131, 1123, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1178, 0, 0, 0, 0, 1134, 0, 1153, 1225, 0,
- 1117, 284, 1128, 385, 244, 0, 1232, 1242, 1175, 541,
- 1246, 1173, 1172, 1219, 1132, 1238, 1166, 349, 1130, 316,
- 191, 215, 0, 1164, 395, 441, 453, 1237, 1149, 1158,
- 242, 1156, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 1198, 1217, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 1129,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 1144, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 1233, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 1222, 1264, 408, 452,
- 230, 522, 475, 1139, 1143, 1137, 1204, 1138, 1193, 1194,
- 1140, 1255, 1256, 1257, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 1226, 1133, 0, 1141, 1142, 1235, 1244,
- 1245, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 1197, 190, 211, 352, 1260, 434, 275, 559, 532,
- 527, 197, 213, 1136, 249, 1147, 1155, 0, 1161, 1169,
- 1170, 1182, 1184, 1185, 1186, 1187, 1205, 1206, 1208, 1216,
- 1218, 1221, 1223, 1230, 1241, 1263, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 1196, 1202, 365, 268, 291, 306, 1211, 531,
- 481, 217, 446, 277, 240, 1229, 1231, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 1192, 1220, 360, 497, 498, 302, 380, 0, 0,
- 0, 1249, 1234, 496, 0, 1177, 1252, 1146, 1165, 1262,
- 1168, 1171, 1213, 1125, 1191, 399, 1162, 1118, 1150, 1120,
- 1157, 1121, 1148, 1179, 257, 1145, 1236, 1195, 1251, 350,
- 254, 1127, 1151, 413, 1167, 196, 1215, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 1258, 354, 1201, 0, 476, 384, 0, 0, 0,
- 1181, 1240, 1189, 1227, 1176, 1214, 1135, 1200, 1253, 1163,
- 1210, 1254, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 627, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 1159, 1207, 1248, 1160, 1209, 252, 307, 259, 251,
- 501, 1259, 1239, 1124, 1188, 1247, 0, 0, 219, 1250,
- 1183, 0, 1212, 0, 1265, 1119, 1203, 0, 1122, 1126,
- 1261, 1243, 1154, 262, 0, 0, 0, 0, 0, 0,
- 0, 1180, 1190, 1224, 1228, 1174, 0, 0, 0, 0,
- 0, 2694, 0, 1152, 0, 1199, 0, 0, 0, 1131,
- 1123, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1178, 0, 0, 0, 0, 1134, 0,
- 1153, 1225, 0, 1117, 284, 1128, 385, 244, 0, 1232,
- 1242, 1175, 541, 1246, 1173, 1172, 1219, 1132, 1238, 1166,
- 349, 1130, 316, 191, 215, 0, 1164, 395, 441, 453,
- 1237, 1149, 1158, 242, 1156, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 1198, 1217, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 1129, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 1144, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 1233, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 1222,
- 1264, 408, 452, 230, 522, 475, 1139, 1143, 1137, 1204,
- 1138, 1193, 1194, 1140, 1255, 1256, 1257, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 1226, 1133, 0, 1141,
- 1142, 1235, 1244, 1245, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 1197, 190, 211, 352, 1260, 434,
- 275, 559, 532, 527, 197, 213, 1136, 249, 1147, 1155,
- 0, 1161, 1169, 1170, 1182, 1184, 1185, 1186, 1187, 1205,
- 1206, 1208, 1216, 1218, 1221, 1223, 1230, 1241, 1263, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 1196, 1202, 365, 268, 291,
- 306, 1211, 531, 481, 217, 446, 277, 240, 1229, 1231,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 1192, 1220, 360, 497, 498, 302,
- 380, 0, 0, 0, 1249, 1234, 496, 0, 1177, 1252,
- 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399, 1162,
- 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145, 1236,
- 1195, 1251, 350, 254, 1127, 1151, 413, 1167, 196, 1215,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 1258, 354, 1201, 0, 476, 384,
- 0, 0, 0, 1181, 1240, 1189, 1227, 1176, 1214, 1135,
- 1200, 1253, 1163, 1210, 1254, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 0, 800, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
- 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
- 329, 334, 341, 347, 1159, 1207, 1248, 1160, 1209, 252,
- 307, 259, 251, 501, 1259, 1239, 1124, 1188, 1247, 0,
- 0, 219, 1250, 1183, 0, 1212, 0, 1265, 1119, 1203,
- 0, 1122, 1126, 1261, 1243, 1154, 262, 0, 0, 0,
- 0, 0, 0, 0, 1180, 1190, 1224, 1228, 1174, 0,
- 0, 0, 0, 0, 2048, 0, 1152, 0, 1199, 0,
- 0, 0, 1131, 1123, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1178, 0, 0, 0,
- 0, 1134, 0, 1153, 1225, 0, 1117, 284, 1128, 385,
- 244, 0, 1232, 1242, 1175, 541, 1246, 1173, 1172, 1219,
- 1132, 1238, 1166, 349, 1130, 316, 191, 215, 0, 1164,
- 395, 441, 453, 1237, 1149, 1158, 242, 1156, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 1198, 1217,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
- 243, 561, 218, 536, 210, 1129, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 1144, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 1233, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 1222, 1264, 408, 452, 230, 522, 475, 1139,
- 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256, 1257,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 1226,
- 1133, 0, 1141, 1142, 1235, 1244, 1245, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 1197, 190, 211,
- 352, 1260, 434, 275, 559, 532, 527, 197, 213, 1136,
- 249, 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184, 1185,
- 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223, 1230,
- 1241, 1263, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 1196, 1202,
- 365, 268, 291, 306, 1211, 531, 481, 217, 446, 277,
- 240, 1229, 1231, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 1192, 1220, 360,
- 497, 498, 302, 380, 0, 0, 0, 1249, 1234, 496,
- 0, 1177, 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125,
- 1191, 399, 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179,
- 257, 1145, 1236, 1195, 1251, 350, 254, 1127, 1151, 413,
- 1167, 196, 1215, 466, 241, 361, 358, 504, 269, 260,
- 256, 239, 303, 369, 411, 486, 405, 1258, 354, 1201,
- 0, 476, 384, 0, 0, 0, 1181, 1240, 1189, 1227,
- 1176, 1214, 1135, 1200, 1253, 1163, 1210, 1254, 309, 237,
- 311, 195, 396, 477, 273, 0, 89, 0, 0, 0,
- 627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
- 324, 325, 327, 329, 334, 341, 347, 1159, 1207, 1248,
- 1160, 1209, 252, 307, 259, 251, 501, 1259, 1239, 1124,
- 1188, 1247, 0, 0, 219, 1250, 1183, 0, 1212, 0,
- 1265, 1119, 1203, 0, 1122, 1126, 1261, 1243, 1154, 262,
- 0, 0, 0, 0, 0, 0, 0, 1180, 1190, 1224,
- 1228, 1174, 0, 0, 0, 0, 0, 0, 0, 1152,
- 0, 1199, 0, 0, 0, 1131, 1123, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1178, 0, 0, 0,
+ 0, 1134, 0, 1153, 1225, 0, 1117, 284, 1128, 385,
+ 244, 0, 1232, 1242, 1175, 541, 1246, 1173, 1172, 1219,
+ 1132, 1238, 1166, 349, 1130, 316, 191, 215, 0, 1164,
+ 395, 441, 453, 1237, 1149, 1158, 242, 1156, 451, 409,
+ 520, 223, 271, 438, 415, 449, 422, 274, 1198, 1217,
+ 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
+ 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
+ 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
+ 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
+ 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
+ 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
+ 243, 561, 218, 536, 210, 1129, 535, 391, 505, 514,
+ 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
+ 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
+ 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
+ 1144, 266, 270, 278, 281, 289, 290, 299, 351, 402,
+ 428, 424, 433, 1233, 500, 518, 530, 540, 546, 547,
+ 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
+ 319, 357, 1222, 1264, 408, 452, 230, 522, 475, 1139,
+ 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256, 1257,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
+ 573, 574, 575, 576, 577, 578, 579, 580, 0, 1226,
+ 1133, 0, 1141, 1142, 1235, 1244, 1245, 581, 368, 465,
+ 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
+ 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
+ 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
+ 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
+ 295, 448, 447, 317, 318, 363, 431, 1197, 190, 211,
+ 352, 1260, 434, 275, 559, 532, 527, 197, 213, 1136,
+ 249, 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184, 1185,
+ 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223, 1230,
+ 1241, 1263, 192, 193, 200, 212, 222, 226, 233, 248,
+ 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
+ 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
+ 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
+ 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
+ 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
+ 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
+ 556, 557, 286, 516, 545, 0, 0, 362, 1196, 1202,
+ 365, 268, 291, 306, 1211, 531, 481, 217, 446, 277,
+ 240, 1229, 1231, 202, 236, 220, 246, 261, 264, 310,
+ 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
+ 487, 488, 489, 491, 379, 253, 416, 1192, 1220, 360,
+ 497, 498, 302, 380, 0, 0, 0, 1249, 1234, 496,
+ 0, 1177, 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125,
+ 1191, 399, 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179,
+ 257, 1145, 1236, 1195, 1251, 350, 254, 1127, 1151, 413,
+ 1167, 196, 1215, 466, 241, 361, 358, 504, 269, 260,
+ 256, 239, 303, 369, 411, 486, 405, 1258, 354, 1201,
+ 0, 476, 384, 0, 0, 0, 1181, 1240, 1189, 1227,
+ 1176, 1214, 1135, 1200, 1253, 1163, 1210, 1254, 309, 237,
+ 311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
+ 188, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
+ 324, 325, 327, 329, 334, 341, 347, 1159, 1207, 1248,
+ 1160, 1209, 252, 307, 259, 251, 501, 1259, 1239, 1124,
+ 1188, 1247, 0, 0, 219, 1250, 1183, 0, 1212, 0,
+ 1265, 1119, 1203, 0, 1122, 1126, 1261, 1243, 1154, 262,
+ 0, 0, 0, 0, 0, 0, 0, 1180, 1190, 1224,
+ 1228, 1174, 0, 0, 0, 0, 0, 2733, 0, 1152,
+ 0, 1199, 0, 0, 0, 1131, 1123, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2885,7 +2610,7 @@ var yyAct = [...]int{
0, 1212, 0, 1265, 1119, 1203, 0, 1122, 1126, 1261,
1243, 1154, 262, 0, 0, 0, 0, 0, 0, 0,
1180, 1190, 1224, 1228, 1174, 0, 0, 0, 0, 0,
- 0, 0, 1152, 0, 1199, 0, 0, 0, 1131, 1123,
+ 2694, 0, 1152, 0, 1199, 0, 0, 0, 1131, 1123,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2950,7 +2675,7 @@ var yyAct = [...]int{
219, 1250, 1183, 0, 1212, 0, 1265, 1119, 1203, 0,
1122, 1126, 1261, 1243, 1154, 262, 0, 0, 0, 0,
0, 0, 0, 1180, 1190, 1224, 1228, 1174, 0, 0,
- 0, 0, 0, 0, 0, 1152, 0, 1199, 0, 0,
+ 0, 0, 0, 2048, 0, 1152, 0, 1199, 0, 0,
0, 1131, 1123, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3007,7 +2732,7 @@ var yyAct = [...]int{
239, 303, 369, 411, 486, 405, 1258, 354, 1201, 0,
476, 384, 0, 0, 0, 1181, 1240, 1189, 1227, 1176,
1214, 1135, 1200, 1253, 1163, 1210, 1254, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 188,
+ 195, 396, 477, 273, 0, 89, 0, 0, 0, 627,
0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
325, 327, 329, 334, 341, 347, 1159, 1207, 1248, 1160,
@@ -3064,266 +2789,203 @@ var yyAct = [...]int{
446, 277, 240, 1229, 1231, 202, 236, 220, 246, 261,
264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
231, 464, 487, 488, 489, 491, 379, 253, 416, 1192,
- 1220, 360, 497, 498, 302, 380, 0, 0, 0, 0,
- 0, 496, 0, 679, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 666, 0,
- 0, 0, 257, 671, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 678,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 674, 675, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 89, 0,
- 0, 816, 800, 766, 767, 804, 817, 818, 819, 820,
- 805, 0, 228, 806, 807, 235, 808, 0, 765, 706,
- 708, 707, 725, 726, 727, 728, 729, 730, 731, 704,
- 813, 821, 822, 0, 252, 307, 259, 251, 501, 0,
- 0, 1927, 1928, 1929, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 648, 663, 0, 677, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 660, 661, 0,
- 0, 0, 0, 760, 0, 662, 0, 0, 670, 823,
- 824, 825, 826, 827, 828, 829, 830, 831, 832, 833,
- 834, 835, 836, 837, 838, 839, 840, 841, 842, 843,
- 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
- 854, 855, 856, 857, 858, 859, 860, 861, 862, 863,
- 864, 673, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 759, 0, 0,
- 541, 0, 0, 757, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 810, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
+ 1220, 360, 497, 498, 302, 380, 0, 0, 0, 1249,
+ 1234, 496, 0, 1177, 1252, 1146, 1165, 1262, 1168, 1171,
+ 1213, 1125, 1191, 399, 1162, 1118, 1150, 1120, 1157, 1121,
+ 1148, 1179, 257, 1145, 1236, 1195, 1251, 350, 254, 1127,
+ 1151, 413, 1167, 196, 1215, 466, 241, 361, 358, 504,
+ 269, 260, 256, 239, 303, 369, 411, 486, 405, 1258,
+ 354, 1201, 0, 476, 384, 0, 0, 0, 1181, 1240,
+ 1189, 1227, 1176, 1214, 1135, 1200, 1253, 1163, 1210, 1254,
+ 309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
+ 0, 0, 188, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
+ 344, 343, 324, 325, 327, 329, 334, 341, 347, 1159,
+ 1207, 1248, 1160, 1209, 252, 307, 259, 251, 501, 1259,
+ 1239, 1124, 1188, 1247, 0, 0, 219, 1250, 1183, 0,
+ 1212, 0, 1265, 1119, 1203, 0, 1122, 1126, 1261, 1243,
+ 1154, 262, 0, 0, 0, 0, 0, 0, 0, 1180,
+ 1190, 1224, 1228, 1174, 0, 0, 0, 0, 0, 0,
+ 0, 1152, 0, 1199, 0, 0, 0, 1131, 1123, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1178, 0, 0, 0, 0, 1134, 0, 1153, 1225,
+ 0, 1117, 284, 1128, 385, 244, 0, 1232, 1242, 1175,
+ 541, 1246, 1173, 1172, 1219, 1132, 1238, 1166, 349, 1130,
+ 316, 191, 215, 0, 1164, 395, 441, 453, 1237, 1149,
+ 1158, 242, 1156, 451, 409, 520, 223, 271, 438, 415,
+ 449, 422, 274, 1198, 1217, 450, 356, 506, 432, 517,
542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 811, 812, 243, 561, 712, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 720, 721, 267, 293, 429, 359, 430, 292,
+ 280, 245, 398, 510, 511, 243, 561, 218, 536, 210,
+ 1129, 535, 391, 505, 514, 378, 367, 209, 512, 376,
+ 366, 320, 339, 340, 267, 293, 429, 359, 430, 292,
294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
+ 525, 562, 224, 225, 227, 1144, 266, 270, 278, 281,
+ 289, 290, 299, 351, 402, 428, 424, 433, 1233, 500,
518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 770, 758, 683, 774, 685, 771,
- 772, 680, 681, 684, 773, 563, 564, 565, 566, 567,
+ 555, 554, 390, 297, 474, 319, 357, 1222, 1264, 408,
+ 452, 230, 522, 475, 1139, 1143, 1137, 1204, 1138, 1193,
+ 1194, 1140, 1255, 1256, 1257, 563, 564, 565, 566, 567,
568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 761, 669, 668, 0, 676, 0,
- 702, 703, 705, 709, 710, 711, 722, 723, 724, 732,
- 734, 735, 733, 736, 737, 738, 741, 742, 743, 744,
- 739, 740, 745, 686, 690, 687, 688, 689, 701, 691,
- 692, 693, 694, 695, 696, 697, 698, 699, 700, 784,
- 785, 786, 787, 788, 789, 715, 719, 718, 716, 717,
- 713, 714, 667, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 775, 249, 776, 0, 0, 780,
- 0, 0, 0, 782, 781, 0, 783, 749, 748, 0,
- 0, 777, 778, 0, 779, 0, 0, 192, 193, 200,
+ 578, 579, 580, 0, 1226, 1133, 0, 1141, 1142, 1235,
+ 1244, 1245, 581, 368, 465, 519, 321, 333, 336, 326,
+ 345, 0, 346, 322, 323, 328, 330, 331, 332, 337,
+ 338, 342, 348, 238, 201, 374, 382, 499, 298, 206,
+ 207, 208, 492, 493, 494, 495, 533, 534, 538, 442,
+ 443, 444, 445, 279, 528, 295, 448, 447, 317, 318,
+ 363, 431, 1197, 190, 211, 352, 1260, 434, 275, 559,
+ 532, 527, 197, 213, 1136, 249, 1147, 1155, 0, 1161,
+ 1169, 1170, 1182, 1184, 1185, 1186, 1187, 1205, 1206, 1208,
+ 1216, 1218, 1221, 1223, 1230, 1241, 1263, 192, 193, 200,
212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 790,
- 791, 792, 793, 794, 795, 796, 797, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 815, 0, 202, 236,
+ 479, 480, 484, 507, 509, 521, 539, 544, 460, 287,
+ 288, 426, 427, 300, 301, 556, 557, 286, 516, 545,
+ 0, 0, 362, 1196, 1202, 365, 268, 291, 306, 1211,
+ 531, 481, 217, 446, 277, 240, 1229, 1231, 202, 236,
220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 679, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 666, 0, 0, 0, 257,
- 671, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 678, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 674, 675, 0,
- 0, 0, 0, 0, 0, 2077, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 89, 0, 0, 816, 800,
- 766, 767, 804, 817, 818, 819, 820, 805, 0, 228,
- 806, 807, 235, 808, 0, 765, 706, 708, 707, 725,
- 726, 727, 728, 729, 730, 731, 704, 813, 821, 822,
- 2078, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 0, 0, 0, 0, 0,
- 0, 648, 663, 0, 677, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 660, 661, 0, 0, 0, 0,
- 760, 0, 662, 0, 0, 670, 823, 824, 825, 826,
- 827, 828, 829, 830, 831, 832, 833, 834, 835, 836,
- 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 854, 855, 856,
- 857, 858, 859, 860, 861, 862, 863, 864, 673, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 759, 0, 0, 541, 0, 0,
- 757, 0, 0, 0, 0, 349, 0, 316, 191, 215,
- 0, 0, 395, 441, 453, 0, 0, 0, 810, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 811, 812, 243, 561, 712, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 720,
- 721, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 770, 758, 683, 774, 685, 771, 772, 680, 681,
- 684, 773, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 761, 669, 668, 0, 676, 0, 702, 703, 705,
- 709, 710, 711, 722, 723, 724, 732, 734, 735, 733,
- 736, 737, 738, 741, 742, 743, 744, 739, 740, 745,
- 686, 690, 687, 688, 689, 701, 691, 692, 693, 694,
- 695, 696, 697, 698, 699, 700, 784, 785, 786, 787,
- 788, 789, 715, 719, 718, 716, 717, 713, 714, 667,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 775, 249, 776, 0, 0, 780, 0, 0, 0,
- 782, 781, 0, 783, 749, 748, 0, 0, 777, 778,
- 0, 779, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 790, 791, 792, 793,
- 794, 795, 796, 797, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 815, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 0,
- 380, 360, 497, 498, 302, 80, 496, 0, 679, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 0, 0, 0, 666, 0, 0, 0, 257, 671, 0,
- 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
- 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
- 369, 411, 486, 405, 678, 354, 0, 0, 476, 384,
- 0, 0, 0, 0, 0, 674, 675, 0, 0, 0,
- 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 89, 0, 0, 816, 800, 766, 767,
- 804, 817, 818, 819, 820, 805, 0, 228, 806, 807,
- 235, 808, 0, 765, 706, 708, 707, 725, 726, 727,
- 728, 729, 730, 731, 704, 813, 821, 822, 0, 252,
- 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
- 0, 219, 0, 0, 0, 0, 0, 0, 0, 648,
- 663, 0, 677, 0, 0, 0, 262, 0, 0, 0,
+ 253, 416, 1192, 1220, 360, 497, 498, 302, 380, 0,
+ 0, 0, 1249, 1234, 496, 0, 1177, 1252, 1146, 1165,
+ 1262, 1168, 1171, 1213, 1125, 1191, 399, 1162, 1118, 1150,
+ 1120, 1157, 1121, 1148, 1179, 257, 1145, 1236, 1195, 1251,
+ 350, 254, 1127, 1151, 413, 1167, 196, 1215, 466, 241,
+ 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
+ 486, 405, 1258, 354, 1201, 0, 476, 384, 0, 0,
+ 0, 1181, 1240, 1189, 1227, 1176, 1214, 1135, 1200, 1253,
+ 1163, 1210, 1254, 309, 237, 311, 195, 396, 477, 273,
+ 0, 0, 0, 0, 0, 627, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
+ 0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
+ 341, 347, 1159, 1207, 1248, 1160, 1209, 252, 307, 259,
+ 251, 501, 1259, 1239, 1124, 1188, 1247, 0, 0, 219,
+ 1250, 1183, 0, 1212, 0, 1265, 1119, 1203, 0, 1122,
+ 1126, 1261, 1243, 1154, 262, 0, 0, 0, 0, 0,
+ 0, 0, 1180, 1190, 1224, 1228, 1174, 0, 0, 0,
+ 0, 0, 0, 0, 1152, 0, 1199, 0, 0, 0,
+ 1131, 1123, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1178, 0, 0, 0, 0, 1134,
+ 0, 1153, 1225, 0, 1117, 284, 1128, 385, 244, 0,
+ 1232, 1242, 1175, 541, 1246, 1173, 1172, 1219, 1132, 1238,
+ 1166, 349, 1130, 316, 191, 215, 0, 1164, 395, 441,
+ 453, 1237, 1149, 1158, 242, 1156, 451, 409, 520, 223,
+ 271, 438, 415, 449, 422, 274, 1198, 1217, 450, 356,
+ 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
+ 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
+ 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
+ 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
+ 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
+ 0, 437, 255, 280, 245, 398, 510, 511, 243, 561,
+ 218, 536, 210, 1129, 535, 391, 505, 514, 378, 367,
+ 209, 512, 376, 366, 320, 339, 340, 267, 293, 429,
+ 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
+ 199, 0, 478, 525, 562, 224, 225, 227, 1144, 266,
+ 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
+ 433, 1233, 500, 518, 530, 540, 546, 547, 549, 550,
+ 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
+ 1222, 1264, 408, 452, 230, 522, 475, 1139, 1143, 1137,
+ 1204, 1138, 1193, 1194, 1140, 1255, 1256, 1257, 563, 564,
+ 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 580, 0, 1226, 1133, 0,
+ 1141, 1142, 1235, 1244, 1245, 581, 368, 465, 519, 321,
+ 333, 336, 326, 345, 0, 346, 322, 323, 328, 330,
+ 331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
+ 499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
+ 534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
+ 447, 317, 318, 363, 431, 1197, 190, 211, 352, 1260,
+ 434, 275, 559, 532, 527, 197, 213, 1136, 249, 1147,
+ 1155, 0, 1161, 1169, 1170, 1182, 1184, 1185, 1186, 1187,
+ 1205, 1206, 1208, 1216, 1218, 1221, 1223, 1230, 1241, 1263,
+ 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
+ 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
+ 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
+ 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
+ 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
+ 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
+ 544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
+ 286, 516, 545, 0, 0, 362, 1196, 1202, 365, 268,
+ 291, 306, 1211, 531, 481, 217, 446, 277, 240, 1229,
+ 1231, 202, 236, 220, 246, 261, 264, 310, 375, 383,
+ 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
+ 489, 491, 379, 253, 416, 1192, 1220, 360, 497, 498,
+ 302, 380, 0, 0, 0, 1249, 1234, 496, 0, 1177,
+ 1252, 1146, 1165, 1262, 1168, 1171, 1213, 1125, 1191, 399,
+ 1162, 1118, 1150, 1120, 1157, 1121, 1148, 1179, 257, 1145,
+ 1236, 1195, 1251, 350, 254, 1127, 1151, 413, 1167, 196,
+ 1215, 466, 241, 361, 358, 504, 269, 260, 256, 239,
+ 303, 369, 411, 486, 405, 1258, 354, 1201, 0, 476,
+ 384, 0, 0, 0, 1181, 1240, 1189, 1227, 1176, 1214,
+ 1135, 1200, 1253, 1163, 1210, 1254, 309, 237, 311, 195,
+ 396, 477, 273, 0, 0, 0, 0, 0, 800, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
+ 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
+ 327, 329, 334, 341, 347, 1159, 1207, 1248, 1160, 1209,
+ 252, 307, 259, 251, 501, 1259, 1239, 1124, 1188, 1247,
+ 0, 0, 219, 1250, 1183, 0, 1212, 0, 1265, 1119,
+ 1203, 0, 1122, 1126, 1261, 1243, 1154, 262, 0, 0,
+ 0, 0, 0, 0, 0, 1180, 1190, 1224, 1228, 1174,
+ 0, 0, 0, 0, 0, 0, 0, 1152, 0, 1199,
+ 0, 0, 0, 1131, 1123, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 660, 661, 0, 0, 0, 0, 760, 0,
- 662, 0, 0, 670, 823, 824, 825, 826, 827, 828,
- 829, 830, 831, 832, 833, 834, 835, 836, 837, 838,
- 839, 840, 841, 842, 843, 844, 845, 846, 847, 848,
- 849, 850, 851, 852, 853, 854, 855, 856, 857, 858,
- 859, 860, 861, 862, 863, 864, 673, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
- 244, 0, 759, 0, 0, 541, 0, 0, 757, 0,
- 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
- 395, 441, 453, 0, 0, 0, 810, 0, 451, 409,
- 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
- 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
- 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
- 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
- 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
- 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
- 313, 204, 0, 437, 255, 280, 245, 398, 811, 812,
- 243, 561, 712, 536, 210, 0, 535, 391, 505, 514,
- 378, 367, 209, 512, 376, 366, 320, 720, 721, 267,
- 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
- 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
- 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
- 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
- 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
- 319, 357, 0, 0, 408, 452, 230, 522, 475, 770,
- 758, 683, 774, 685, 771, 772, 680, 681, 684, 773,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 761,
- 669, 668, 0, 676, 0, 702, 703, 705, 709, 710,
- 711, 722, 723, 724, 732, 734, 735, 733, 736, 737,
- 738, 741, 742, 743, 744, 739, 740, 745, 686, 690,
- 687, 688, 689, 701, 691, 692, 693, 694, 695, 696,
- 697, 698, 699, 700, 784, 785, 786, 787, 788, 789,
- 715, 719, 718, 716, 717, 713, 714, 667, 190, 211,
- 352, 88, 434, 275, 559, 532, 527, 197, 213, 775,
- 249, 776, 0, 0, 780, 0, 0, 0, 782, 781,
- 0, 783, 749, 748, 0, 0, 777, 778, 0, 779,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 790, 791, 792, 793, 794, 795,
- 796, 797, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 815, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 679, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 666, 0, 0, 0, 257, 671, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 678, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 674, 675, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 89, 0, 0, 816, 800, 766, 767, 804, 817, 818,
- 819, 820, 805, 0, 228, 806, 807, 235, 808, 0,
- 765, 706, 708, 707, 725, 726, 727, 728, 729, 730,
- 731, 704, 813, 821, 822, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 648, 663, 0, 677,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 660,
- 661, 0, 0, 0, 0, 760, 0, 662, 0, 0,
- 670, 823, 824, 825, 826, 827, 828, 829, 830, 831,
- 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
- 842, 843, 844, 845, 846, 847, 848, 849, 850, 851,
- 852, 853, 854, 855, 856, 857, 858, 859, 860, 861,
- 862, 863, 864, 673, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 759,
- 0, 0, 541, 0, 0, 757, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 810, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 3384, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 811, 812, 243, 561, 712,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 720, 721, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 770, 758, 683, 774,
- 685, 771, 772, 680, 681, 684, 773, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 761, 669, 668, 0,
- 676, 0, 702, 703, 705, 709, 710, 711, 722, 723,
- 724, 732, 734, 735, 733, 736, 737, 738, 741, 742,
- 743, 744, 739, 740, 745, 686, 690, 687, 688, 689,
- 701, 691, 692, 693, 694, 695, 696, 697, 698, 699,
- 700, 784, 785, 786, 787, 788, 789, 715, 719, 718,
- 716, 717, 713, 714, 667, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 775, 249, 776, 0,
- 0, 780, 0, 0, 0, 782, 781, 0, 783, 749,
- 748, 0, 0, 777, 778, 0, 779, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 790, 791, 792, 793, 794, 795, 796, 797, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 815, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1178, 0, 0,
+ 0, 0, 1134, 0, 1153, 1225, 0, 1117, 284, 1128,
+ 385, 244, 0, 1232, 1242, 1175, 541, 1246, 1173, 1172,
+ 1219, 1132, 1238, 1166, 349, 1130, 316, 191, 215, 0,
+ 1164, 395, 441, 453, 1237, 1149, 1158, 242, 1156, 451,
+ 409, 520, 223, 271, 438, 415, 449, 422, 274, 1198,
+ 1217, 450, 356, 506, 432, 517, 542, 543, 250, 389,
+ 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
+ 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
+ 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
+ 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
+ 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
+ 511, 243, 561, 218, 536, 210, 1129, 535, 391, 505,
+ 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
+ 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
+ 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
+ 227, 1144, 266, 270, 278, 281, 289, 290, 299, 351,
+ 402, 428, 424, 433, 1233, 500, 518, 530, 540, 546,
+ 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
+ 474, 319, 357, 1222, 1264, 408, 452, 230, 522, 475,
+ 1139, 1143, 1137, 1204, 1138, 1193, 1194, 1140, 1255, 1256,
+ 1257, 563, 564, 565, 566, 567, 568, 569, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
+ 1226, 1133, 0, 1141, 1142, 1235, 1244, 1245, 581, 368,
+ 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
+ 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
+ 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
+ 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
+ 528, 295, 448, 447, 317, 318, 363, 431, 1197, 190,
+ 211, 352, 1260, 434, 275, 559, 532, 527, 197, 213,
+ 1136, 249, 1147, 1155, 0, 1161, 1169, 1170, 1182, 1184,
+ 1185, 1186, 1187, 1205, 1206, 1208, 1216, 1218, 1221, 1223,
+ 1230, 1241, 1263, 192, 193, 200, 212, 222, 226, 233,
+ 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
+ 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
+ 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
+ 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
+ 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
+ 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
+ 301, 556, 557, 286, 516, 545, 0, 0, 362, 1196,
+ 1202, 365, 268, 291, 306, 1211, 531, 481, 217, 446,
+ 277, 240, 1229, 1231, 202, 236, 220, 246, 261, 264,
+ 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
+ 464, 487, 488, 489, 491, 379, 253, 416, 1192, 1220,
+ 360, 497, 498, 302, 380, 0, 0, 0, 0, 0,
496, 0, 679, 0, 0, 0, 0, 0, 0, 0,
0, 0, 399, 0, 0, 0, 0, 666, 0, 0,
0, 257, 671, 0, 0, 0, 350, 254, 0, 0,
@@ -3331,12 +2993,12 @@ var yyAct = [...]int{
260, 256, 239, 303, 369, 411, 486, 405, 678, 354,
0, 0, 476, 384, 0, 0, 0, 0, 0, 674,
675, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 89, 0, 1497,
+ 237, 311, 195, 396, 477, 273, 0, 89, 0, 0,
816, 800, 766, 767, 804, 817, 818, 819, 820, 805,
0, 228, 806, 807, 235, 808, 0, 765, 706, 708,
707, 725, 726, 727, 728, 729, 730, 731, 704, 813,
821, 822, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
+ 1927, 1928, 1929, 0, 0, 219, 0, 0, 0, 0,
0, 0, 0, 648, 663, 0, 677, 0, 0, 0,
262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 660, 661, 0, 0,
@@ -3395,16 +3057,16 @@ var yyAct = [...]int{
0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
303, 369, 411, 486, 405, 678, 354, 0, 0, 476,
384, 0, 0, 0, 0, 0, 674, 675, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
+ 0, 0, 0, 0, 2077, 0, 309, 237, 311, 195,
396, 477, 273, 0, 89, 0, 0, 816, 800, 766,
767, 804, 817, 818, 819, 820, 805, 0, 228, 806,
807, 235, 808, 0, 765, 706, 708, 707, 725, 726,
- 727, 728, 729, 730, 731, 704, 813, 821, 822, 0,
+ 727, 728, 729, 730, 731, 704, 813, 821, 822, 2078,
252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
648, 663, 0, 677, 0, 0, 0, 262, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 660, 661, 905, 0, 0, 0, 760,
+ 0, 0, 0, 660, 661, 0, 0, 0, 0, 760,
0, 662, 0, 0, 670, 823, 824, 825, 826, 827,
828, 829, 830, 831, 832, 833, 834, 835, 836, 837,
838, 839, 840, 841, 842, 843, 844, 845, 846, 847,
@@ -3452,404 +3114,857 @@ var yyAct = [...]int{
0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
277, 240, 815, 0, 202, 236, 220, 246, 261, 264,
310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
- 360, 497, 498, 302, 496, 0, 679, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
- 0, 666, 0, 0, 0, 257, 671, 0, 0, 0,
- 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
- 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
- 486, 405, 678, 354, 0, 0, 476, 384, 0, 0,
- 0, 0, 0, 674, 675, 0, 0, 0, 0, 0,
- 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 89, 0, 0, 816, 800, 766, 767, 804, 817,
- 818, 819, 820, 805, 0, 228, 806, 807, 235, 808,
- 0, 765, 706, 708, 707, 725, 726, 727, 728, 729,
- 730, 731, 704, 813, 821, 822, 0, 252, 307, 259,
- 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
- 0, 0, 0, 0, 0, 0, 0, 648, 663, 0,
- 677, 0, 0, 0, 262, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 660, 661, 0, 0, 0, 0, 760, 0, 662, 0,
- 0, 670, 823, 824, 825, 826, 827, 828, 829, 830,
- 831, 832, 833, 834, 835, 836, 837, 838, 839, 840,
- 841, 842, 843, 844, 845, 846, 847, 848, 849, 850,
- 851, 852, 853, 854, 855, 856, 857, 858, 859, 860,
- 861, 862, 863, 864, 673, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
- 759, 0, 0, 541, 0, 0, 757, 0, 0, 0,
- 0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
- 453, 0, 0, 0, 810, 0, 451, 409, 520, 223,
- 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
- 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
- 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
- 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
- 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
- 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
- 0, 437, 255, 280, 245, 398, 811, 812, 243, 561,
- 712, 536, 210, 0, 535, 391, 505, 514, 378, 367,
- 209, 512, 376, 366, 320, 720, 721, 267, 293, 429,
- 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
- 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
- 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
- 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
- 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
- 0, 0, 408, 452, 230, 522, 475, 770, 758, 683,
- 774, 685, 771, 772, 680, 681, 684, 773, 563, 564,
- 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
- 575, 576, 577, 578, 579, 580, 0, 761, 669, 668,
- 0, 676, 0, 702, 703, 705, 709, 710, 711, 722,
- 723, 724, 732, 734, 735, 733, 736, 737, 738, 741,
- 742, 743, 744, 739, 740, 745, 686, 690, 687, 688,
- 689, 701, 691, 692, 693, 694, 695, 696, 697, 698,
- 699, 700, 784, 785, 786, 787, 788, 789, 715, 719,
- 718, 716, 717, 713, 714, 667, 190, 211, 352, 0,
- 434, 275, 559, 532, 527, 197, 213, 775, 249, 776,
- 0, 0, 780, 0, 0, 0, 782, 781, 0, 783,
- 749, 748, 0, 0, 777, 778, 0, 779, 0, 0,
- 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
- 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
- 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
- 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
- 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
- 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
- 544, 460, 790, 791, 792, 793, 794, 795, 796, 797,
- 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
- 291, 306, 0, 531, 481, 217, 446, 277, 240, 815,
- 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
- 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
- 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
- 302, 496, 0, 679, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 666, 0,
- 0, 0, 257, 671, 0, 0, 0, 350, 254, 0,
- 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
- 269, 260, 256, 239, 303, 369, 411, 486, 405, 678,
- 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
- 674, 675, 0, 0, 0, 0, 0, 0, 0, 0,
- 309, 237, 311, 195, 396, 477, 273, 0, 89, 0,
- 0, 816, 800, 766, 767, 804, 817, 818, 819, 820,
- 805, 0, 228, 806, 807, 235, 808, 0, 765, 706,
- 708, 707, 725, 726, 727, 728, 729, 730, 731, 704,
- 813, 821, 822, 0, 252, 307, 259, 251, 501, 0,
- 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
- 0, 0, 0, 0, 0, 663, 0, 677, 0, 0,
- 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 660, 661, 0,
- 0, 0, 0, 760, 0, 662, 0, 0, 670, 823,
- 824, 825, 826, 827, 828, 829, 830, 831, 832, 833,
- 834, 835, 836, 837, 838, 839, 840, 841, 842, 843,
- 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
- 854, 855, 856, 857, 858, 859, 860, 861, 862, 863,
- 864, 673, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 284, 0, 385, 244, 0, 759, 0, 0,
- 541, 0, 0, 757, 0, 0, 0, 0, 349, 0,
- 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
- 0, 810, 0, 451, 409, 520, 223, 271, 438, 415,
- 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
- 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
- 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
- 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
- 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
- 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
- 280, 245, 398, 811, 812, 243, 561, 712, 536, 210,
- 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
- 366, 320, 720, 721, 267, 293, 429, 359, 430, 292,
- 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
- 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
- 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
- 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
- 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
- 452, 230, 522, 475, 770, 758, 683, 774, 685, 771,
- 772, 680, 681, 684, 773, 563, 564, 565, 566, 567,
- 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 580, 0, 761, 669, 668, 0, 676, 0,
- 702, 703, 705, 709, 710, 711, 722, 723, 724, 732,
- 734, 735, 733, 736, 737, 738, 741, 742, 743, 744,
- 739, 740, 745, 686, 690, 687, 688, 689, 701, 691,
- 692, 693, 694, 695, 696, 697, 698, 699, 700, 784,
- 785, 786, 787, 788, 789, 715, 719, 718, 716, 717,
- 713, 714, 667, 190, 211, 352, 0, 434, 275, 559,
- 532, 527, 197, 213, 775, 249, 776, 0, 0, 780,
- 0, 0, 0, 782, 781, 0, 783, 749, 748, 0,
- 0, 777, 778, 0, 779, 0, 0, 192, 193, 200,
- 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
- 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
- 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
- 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
- 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
- 479, 480, 484, 507, 509, 521, 539, 544, 460, 790,
- 791, 792, 793, 794, 795, 796, 797, 286, 516, 545,
- 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
- 531, 481, 217, 446, 277, 240, 815, 0, 202, 236,
- 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
- 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
- 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 0, 0, 0, 0, 257,
- 0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
- 196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
- 239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
- 476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 627,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
- 0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
- 325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
- 0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
- 0, 0, 0, 219, 0, 976, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 262, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 975, 541, 0, 0,
- 0, 0, 0, 972, 973, 349, 933, 316, 191, 215,
- 966, 970, 395, 441, 453, 0, 0, 0, 242, 0,
- 451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
- 389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
- 473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
- 214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
- 485, 515, 229, 463, 0, 0, 560, 205, 513, 482,
- 377, 312, 313, 204, 0, 437, 255, 280, 245, 398,
- 510, 511, 243, 561, 218, 536, 210, 0, 535, 391,
- 505, 514, 378, 367, 209, 512, 376, 366, 320, 339,
- 340, 267, 293, 429, 359, 430, 292, 294, 387, 386,
- 388, 198, 524, 0, 199, 0, 478, 525, 562, 224,
- 225, 227, 0, 266, 270, 278, 281, 289, 290, 299,
- 351, 402, 428, 424, 433, 0, 500, 518, 530, 540,
- 546, 547, 549, 550, 551, 552, 553, 555, 554, 390,
- 297, 474, 319, 357, 0, 0, 408, 452, 230, 522,
- 475, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 563, 564, 565, 566, 567, 568, 569, 570,
- 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 581,
- 368, 465, 519, 321, 333, 336, 326, 345, 0, 346,
- 322, 323, 328, 330, 331, 332, 337, 338, 342, 348,
- 238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
- 493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
- 279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
- 213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
- 233, 248, 263, 265, 272, 285, 296, 304, 305, 308,
- 314, 364, 370, 371, 372, 373, 392, 393, 394, 397,
- 400, 401, 404, 406, 407, 410, 414, 418, 419, 420,
- 421, 423, 425, 435, 440, 454, 455, 456, 457, 458,
- 461, 462, 467, 468, 469, 470, 471, 479, 480, 484,
- 507, 509, 521, 539, 544, 460, 287, 288, 426, 427,
- 300, 301, 556, 557, 286, 516, 545, 0, 0, 362,
- 0, 0, 365, 268, 291, 306, 0, 531, 481, 217,
- 446, 277, 240, 0, 0, 202, 236, 220, 246, 261,
- 264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
- 231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
- 0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
+ 464, 487, 488, 489, 491, 379, 253, 416, 0, 380,
+ 360, 497, 498, 302, 80, 496, 0, 679, 0, 0,
0, 0, 0, 0, 0, 0, 0, 399, 0, 0,
- 0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
+ 0, 0, 666, 0, 0, 0, 257, 671, 0, 0,
0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
- 411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 411, 486, 405, 678, 354, 0, 0, 476, 384, 0,
+ 0, 0, 0, 0, 674, 675, 0, 0, 0, 0,
0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 1460, 800, 0, 0, 1457,
- 0, 0, 0, 0, 1455, 0, 228, 1456, 1454, 235,
- 1459, 0, 765, 335, 344, 343, 324, 325, 327, 329,
- 334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
+ 273, 0, 89, 0, 0, 816, 800, 766, 767, 804,
+ 817, 818, 819, 820, 805, 0, 228, 806, 807, 235,
+ 808, 0, 765, 706, 708, 707, 725, 726, 727, 728,
+ 729, 730, 731, 704, 813, 821, 822, 0, 252, 307,
259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
- 219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 219, 0, 0, 0, 0, 0, 0, 0, 648, 663,
+ 0, 677, 0, 0, 0, 262, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 660, 661, 0, 0, 0, 0, 760, 0, 662,
+ 0, 0, 670, 823, 824, 825, 826, 827, 828, 829,
+ 830, 831, 832, 833, 834, 835, 836, 837, 838, 839,
+ 840, 841, 842, 843, 844, 845, 846, 847, 848, 849,
+ 850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
+ 860, 861, 862, 863, 864, 673, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
+ 0, 759, 0, 0, 541, 0, 0, 757, 0, 0,
0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
- 441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
+ 441, 453, 0, 0, 0, 810, 0, 451, 409, 520,
223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
537, 558, 216, 247, 403, 483, 523, 473, 381, 502,
503, 315, 472, 282, 194, 353, 548, 214, 459, 355,
232, 221, 508, 526, 276, 436, 203, 485, 515, 229,
463, 0, 0, 560, 205, 513, 482, 377, 312, 313,
- 204, 0, 437, 255, 280, 245, 398, 510, 511, 243,
- 561, 218, 536, 210, 0, 535, 391, 505, 514, 378,
- 367, 209, 512, 376, 366, 320, 339, 340, 267, 293,
+ 204, 0, 437, 255, 280, 245, 398, 811, 812, 243,
+ 561, 712, 536, 210, 0, 535, 391, 505, 514, 378,
+ 367, 209, 512, 376, 366, 320, 720, 721, 267, 293,
429, 359, 430, 292, 294, 387, 386, 388, 198, 524,
0, 199, 0, 478, 525, 562, 224, 225, 227, 0,
266, 270, 278, 281, 289, 290, 299, 351, 402, 428,
424, 433, 0, 500, 518, 530, 540, 546, 547, 549,
550, 551, 552, 553, 555, 554, 390, 297, 474, 319,
- 357, 0, 0, 408, 452, 230, 522, 475, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 563,
+ 357, 0, 0, 408, 452, 230, 522, 475, 770, 758,
+ 683, 774, 685, 771, 772, 680, 681, 684, 773, 563,
564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
- 574, 575, 576, 577, 578, 579, 580, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 581, 368, 465, 519,
- 321, 333, 336, 326, 345, 0, 346, 322, 323, 328,
- 330, 331, 332, 337, 338, 342, 348, 238, 201, 374,
- 382, 499, 298, 206, 207, 208, 492, 493, 494, 495,
- 533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
- 448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
- 0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 574, 575, 576, 577, 578, 579, 580, 0, 761, 669,
+ 668, 0, 676, 0, 702, 703, 705, 709, 710, 711,
+ 722, 723, 724, 732, 734, 735, 733, 736, 737, 738,
+ 741, 742, 743, 744, 739, 740, 745, 686, 690, 687,
+ 688, 689, 701, 691, 692, 693, 694, 695, 696, 697,
+ 698, 699, 700, 784, 785, 786, 787, 788, 789, 715,
+ 719, 718, 716, 717, 713, 714, 667, 190, 211, 352,
+ 88, 434, 275, 559, 532, 527, 197, 213, 775, 249,
+ 776, 0, 0, 780, 0, 0, 0, 782, 781, 0,
+ 783, 749, 748, 0, 0, 777, 778, 0, 779, 0,
0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
- 539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 286, 516, 545, 0, 0, 362, 0, 0, 365,
+ 539, 544, 460, 790, 791, 792, 793, 794, 795, 796,
+ 797, 286, 516, 545, 0, 0, 362, 0, 0, 365,
268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
- 0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
+ 815, 0, 202, 236, 220, 246, 261, 264, 310, 375,
383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
- 488, 489, 491, 379, 253, 416, 0, 380, 360, 497,
- 498, 302, 80, 496, 0, 0, 0, 0, 0, 0,
+ 488, 489, 491, 379, 253, 416, 380, 0, 360, 497,
+ 498, 302, 496, 0, 679, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 399, 0, 0, 0, 0, 666,
+ 0, 0, 0, 257, 671, 0, 0, 0, 350, 254,
+ 0, 0, 413, 0, 196, 0, 466, 241, 361, 358,
+ 504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
+ 678, 354, 0, 0, 476, 384, 0, 0, 0, 0,
+ 0, 674, 675, 0, 0, 0, 0, 0, 0, 0,
+ 0, 309, 237, 311, 195, 396, 477, 273, 0, 89,
+ 0, 0, 816, 800, 766, 767, 804, 817, 818, 819,
+ 820, 805, 0, 228, 806, 807, 235, 808, 0, 765,
+ 706, 708, 707, 725, 726, 727, 728, 729, 730, 731,
+ 704, 813, 821, 822, 0, 252, 307, 259, 251, 501,
+ 0, 0, 0, 0, 0, 0, 0, 219, 0, 0,
+ 0, 0, 0, 0, 0, 648, 663, 0, 677, 0,
+ 0, 0, 262, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 660, 661,
+ 0, 0, 0, 0, 760, 0, 662, 0, 0, 670,
+ 823, 824, 825, 826, 827, 828, 829, 830, 831, 832,
+ 833, 834, 835, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 860, 861, 862,
+ 863, 864, 673, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 284, 0, 385, 244, 0, 759, 0,
+ 0, 541, 0, 0, 757, 0, 0, 0, 0, 349,
+ 0, 316, 191, 215, 0, 0, 395, 441, 453, 0,
+ 0, 0, 810, 0, 451, 409, 520, 223, 271, 438,
+ 415, 449, 422, 274, 3384, 0, 450, 356, 506, 432,
+ 517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
+ 247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
+ 282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
+ 526, 276, 436, 203, 485, 515, 229, 463, 0, 0,
+ 560, 205, 513, 482, 377, 312, 313, 204, 0, 437,
+ 255, 280, 245, 398, 811, 812, 243, 561, 712, 536,
+ 210, 0, 535, 391, 505, 514, 378, 367, 209, 512,
+ 376, 366, 320, 720, 721, 267, 293, 429, 359, 430,
+ 292, 294, 387, 386, 388, 198, 524, 0, 199, 0,
+ 478, 525, 562, 224, 225, 227, 0, 266, 270, 278,
+ 281, 289, 290, 299, 351, 402, 428, 424, 433, 0,
+ 500, 518, 530, 540, 546, 547, 549, 550, 551, 552,
+ 553, 555, 554, 390, 297, 474, 319, 357, 0, 0,
+ 408, 452, 230, 522, 475, 770, 758, 683, 774, 685,
+ 771, 772, 680, 681, 684, 773, 563, 564, 565, 566,
+ 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
+ 577, 578, 579, 580, 0, 761, 669, 668, 0, 676,
+ 0, 702, 703, 705, 709, 710, 711, 722, 723, 724,
+ 732, 734, 735, 733, 736, 737, 738, 741, 742, 743,
+ 744, 739, 740, 745, 686, 690, 687, 688, 689, 701,
+ 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 784, 785, 786, 787, 788, 789, 715, 719, 718, 716,
+ 717, 713, 714, 667, 190, 211, 352, 0, 434, 275,
+ 559, 532, 527, 197, 213, 775, 249, 776, 0, 0,
+ 780, 0, 0, 0, 782, 781, 0, 783, 749, 748,
+ 0, 0, 777, 778, 0, 779, 0, 0, 192, 193,
+ 200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
+ 296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
+ 392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
+ 414, 418, 419, 420, 421, 423, 425, 435, 440, 454,
+ 455, 456, 457, 458, 461, 462, 467, 468, 469, 470,
+ 471, 479, 480, 484, 507, 509, 521, 539, 544, 460,
+ 790, 791, 792, 793, 794, 795, 796, 797, 286, 516,
+ 545, 0, 0, 362, 0, 0, 365, 268, 291, 306,
+ 0, 531, 481, 217, 446, 277, 240, 815, 0, 202,
+ 236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
+ 283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
+ 379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
+ 0, 679, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 399, 0, 0, 0, 0, 666, 0, 0, 0,
+ 257, 671, 0, 0, 0, 350, 254, 0, 0, 413,
+ 0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
+ 256, 239, 303, 369, 411, 486, 405, 678, 354, 0,
+ 0, 476, 384, 0, 0, 0, 0, 0, 674, 675,
+ 0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
+ 311, 195, 396, 477, 273, 0, 89, 0, 1497, 816,
+ 800, 766, 767, 804, 817, 818, 819, 820, 805, 0,
+ 228, 806, 807, 235, 808, 0, 765, 706, 708, 707,
+ 725, 726, 727, 728, 729, 730, 731, 704, 813, 821,
+ 822, 0, 252, 307, 259, 251, 501, 0, 0, 0,
+ 0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
+ 0, 0, 648, 663, 0, 677, 0, 0, 0, 262,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 660, 661, 0, 0, 0,
+ 0, 760, 0, 662, 0, 0, 670, 823, 824, 825,
+ 826, 827, 828, 829, 830, 831, 832, 833, 834, 835,
+ 836, 837, 838, 839, 840, 841, 842, 843, 844, 845,
+ 846, 847, 848, 849, 850, 851, 852, 853, 854, 855,
+ 856, 857, 858, 859, 860, 861, 862, 863, 864, 673,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 284, 0, 385, 244, 0, 759, 0, 0, 541, 0,
+ 0, 757, 0, 0, 0, 0, 349, 0, 316, 191,
+ 215, 0, 0, 395, 441, 453, 0, 0, 0, 810,
+ 0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
+ 274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
+ 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
+ 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
+ 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
+ 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
+ 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
+ 398, 811, 812, 243, 561, 712, 536, 210, 0, 535,
+ 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
+ 720, 721, 267, 293, 429, 359, 430, 292, 294, 387,
+ 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
+ 224, 225, 227, 0, 266, 270, 278, 281, 289, 290,
+ 299, 351, 402, 428, 424, 433, 0, 500, 518, 530,
+ 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
+ 390, 297, 474, 319, 357, 0, 0, 408, 452, 230,
+ 522, 475, 770, 758, 683, 774, 685, 771, 772, 680,
+ 681, 684, 773, 563, 564, 565, 566, 567, 568, 569,
+ 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
+ 580, 0, 761, 669, 668, 0, 676, 0, 702, 703,
+ 705, 709, 710, 711, 722, 723, 724, 732, 734, 735,
+ 733, 736, 737, 738, 741, 742, 743, 744, 739, 740,
+ 745, 686, 690, 687, 688, 689, 701, 691, 692, 693,
+ 694, 695, 696, 697, 698, 699, 700, 784, 785, 786,
+ 787, 788, 789, 715, 719, 718, 716, 717, 713, 714,
+ 667, 190, 211, 352, 0, 434, 275, 559, 532, 527,
+ 197, 213, 775, 249, 776, 0, 0, 780, 0, 0,
+ 0, 782, 781, 0, 783, 749, 748, 0, 0, 777,
+ 778, 0, 779, 0, 0, 192, 193, 200, 212, 222,
+ 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
+ 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
+ 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
+ 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
+ 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
+ 484, 507, 509, 521, 539, 544, 460, 790, 791, 792,
+ 793, 794, 795, 796, 797, 286, 516, 545, 0, 0,
+ 362, 0, 0, 365, 268, 291, 306, 0, 531, 481,
+ 217, 446, 277, 240, 815, 0, 202, 236, 220, 246,
+ 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
+ 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
+ 380, 0, 360, 497, 498, 302, 496, 0, 679, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
+ 0, 0, 0, 666, 0, 0, 0, 257, 671, 0,
+ 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
+ 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
+ 369, 411, 486, 405, 678, 354, 0, 0, 476, 384,
+ 0, 0, 0, 0, 0, 674, 675, 0, 0, 0,
+ 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
+ 477, 273, 0, 89, 0, 0, 816, 800, 766, 767,
+ 804, 817, 818, 819, 820, 805, 0, 228, 806, 807,
+ 235, 808, 0, 765, 706, 708, 707, 725, 726, 727,
+ 728, 729, 730, 731, 704, 813, 821, 822, 0, 252,
+ 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
+ 0, 219, 0, 0, 0, 0, 0, 0, 0, 648,
+ 663, 0, 677, 0, 0, 0, 262, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 660, 661, 905, 0, 0, 0, 760, 0,
+ 662, 0, 0, 670, 823, 824, 825, 826, 827, 828,
+ 829, 830, 831, 832, 833, 834, 835, 836, 837, 838,
+ 839, 840, 841, 842, 843, 844, 845, 846, 847, 848,
+ 849, 850, 851, 852, 853, 854, 855, 856, 857, 858,
+ 859, 860, 861, 862, 863, 864, 673, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
+ 244, 0, 759, 0, 0, 541, 0, 0, 757, 0,
+ 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
+ 395, 441, 453, 0, 0, 0, 810, 0, 451, 409,
+ 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
+ 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
+ 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
+ 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
+ 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
+ 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
+ 313, 204, 0, 437, 255, 280, 245, 398, 811, 812,
+ 243, 561, 712, 536, 210, 0, 535, 391, 505, 514,
+ 378, 367, 209, 512, 376, 366, 320, 720, 721, 267,
+ 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
+ 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
+ 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
+ 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
+ 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
+ 319, 357, 0, 0, 408, 452, 230, 522, 475, 770,
+ 758, 683, 774, 685, 771, 772, 680, 681, 684, 773,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
+ 573, 574, 575, 576, 577, 578, 579, 580, 0, 761,
+ 669, 668, 0, 676, 0, 702, 703, 705, 709, 710,
+ 711, 722, 723, 724, 732, 734, 735, 733, 736, 737,
+ 738, 741, 742, 743, 744, 739, 740, 745, 686, 690,
+ 687, 688, 689, 701, 691, 692, 693, 694, 695, 696,
+ 697, 698, 699, 700, 784, 785, 786, 787, 788, 789,
+ 715, 719, 718, 716, 717, 713, 714, 667, 190, 211,
+ 352, 0, 434, 275, 559, 532, 527, 197, 213, 775,
+ 249, 776, 0, 0, 780, 0, 0, 0, 782, 781,
+ 0, 783, 749, 748, 0, 0, 777, 778, 0, 779,
+ 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
+ 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
+ 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
+ 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
+ 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
+ 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
+ 521, 539, 544, 460, 790, 791, 792, 793, 794, 795,
+ 796, 797, 286, 516, 545, 0, 0, 362, 0, 0,
+ 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
+ 240, 815, 0, 202, 236, 220, 246, 261, 264, 310,
+ 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
+ 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
+ 497, 498, 302, 496, 0, 679, 0, 0, 0, 0,
0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
+ 666, 0, 0, 0, 257, 671, 0, 0, 0, 350,
254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 405, 678, 354, 0, 0, 476, 384, 0, 0, 0,
+ 0, 0, 674, 675, 0, 0, 0, 0, 0, 0,
0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 89, 0, 0, 0, 188, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
+ 89, 0, 0, 816, 800, 766, 767, 804, 817, 818,
+ 819, 820, 805, 0, 228, 806, 807, 235, 808, 0,
+ 765, 706, 708, 707, 725, 726, 727, 728, 729, 730,
+ 731, 704, 813, 821, 822, 0, 252, 307, 259, 251,
501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 648, 663, 0, 677,
0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 660,
+ 661, 0, 0, 0, 0, 760, 0, 662, 0, 0,
+ 670, 823, 824, 825, 826, 827, 828, 829, 830, 831,
+ 832, 833, 834, 835, 836, 837, 838, 839, 840, 841,
+ 842, 843, 844, 845, 846, 847, 848, 849, 850, 851,
+ 852, 853, 854, 855, 856, 857, 858, 859, 860, 861,
+ 862, 863, 864, 673, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 284, 0, 385, 244, 0, 759,
+ 0, 0, 541, 0, 0, 757, 0, 0, 0, 0,
349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
+ 0, 0, 0, 810, 0, 451, 409, 520, 223, 271,
438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
+ 437, 255, 280, 245, 398, 811, 812, 243, 561, 712,
536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
+ 512, 376, 366, 320, 720, 721, 267, 293, 429, 359,
430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
+ 0, 408, 452, 230, 522, 475, 770, 758, 683, 774,
+ 685, 771, 772, 680, 681, 684, 773, 563, 564, 565,
566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 88, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 2064, 0, 0, 2063, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
+ 576, 577, 578, 579, 580, 0, 761, 669, 668, 0,
+ 676, 0, 702, 703, 705, 709, 710, 711, 722, 723,
+ 724, 732, 734, 735, 733, 736, 737, 738, 741, 742,
+ 743, 744, 739, 740, 745, 686, 690, 687, 688, 689,
+ 701, 691, 692, 693, 694, 695, 696, 697, 698, 699,
+ 700, 784, 785, 786, 787, 788, 789, 715, 719, 718,
+ 716, 717, 713, 714, 667, 190, 211, 352, 0, 434,
+ 275, 559, 532, 527, 197, 213, 775, 249, 776, 0,
+ 0, 780, 0, 0, 0, 782, 781, 0, 783, 749,
+ 748, 0, 0, 777, 778, 0, 779, 0, 0, 192,
193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
+ 460, 790, 791, 792, 793, 794, 795, 796, 797, 286,
516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
+ 306, 0, 531, 481, 217, 446, 277, 240, 815, 0,
202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 1516, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 1518, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
+ 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
+ 496, 0, 679, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 399, 0, 0, 0, 0, 666, 0, 0,
+ 0, 257, 671, 0, 0, 0, 350, 254, 0, 0,
413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 1520, 627, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 1294,
- 0, 1295, 1296, 0, 0, 0, 0, 0, 0, 0,
+ 260, 256, 239, 303, 369, 411, 486, 405, 678, 354,
+ 0, 0, 476, 384, 0, 0, 0, 0, 0, 674,
+ 675, 0, 0, 0, 0, 0, 0, 0, 0, 309,
+ 237, 311, 195, 396, 477, 273, 0, 89, 0, 0,
+ 816, 800, 766, 767, 804, 817, 818, 819, 820, 805,
+ 0, 228, 806, 807, 235, 808, 0, 765, 706, 708,
+ 707, 725, 726, 727, 728, 729, 730, 731, 704, 813,
+ 821, 822, 0, 252, 307, 259, 251, 501, 0, 0,
+ 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
+ 0, 0, 0, 0, 663, 0, 677, 0, 0, 0,
262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
+ 0, 0, 0, 0, 0, 0, 660, 661, 0, 0,
+ 0, 0, 760, 0, 662, 0, 0, 670, 823, 824,
+ 825, 826, 827, 828, 829, 830, 831, 832, 833, 834,
+ 835, 836, 837, 838, 839, 840, 841, 842, 843, 844,
+ 845, 846, 847, 848, 849, 850, 851, 852, 853, 854,
+ 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
+ 673, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 284, 0, 385, 244, 0, 759, 0, 0, 541,
+ 0, 0, 757, 0, 0, 0, 0, 349, 0, 316,
191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
+ 810, 0, 451, 409, 520, 223, 271, 438, 415, 449,
422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
+ 245, 398, 811, 812, 243, 561, 712, 536, 210, 0,
535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
+ 320, 720, 721, 267, 293, 429, 359, 430, 292, 294,
387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
+ 230, 522, 475, 770, 758, 683, 774, 685, 771, 772,
+ 680, 681, 684, 773, 563, 564, 565, 566, 567, 568,
569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
+ 579, 580, 0, 761, 669, 668, 0, 676, 0, 702,
+ 703, 705, 709, 710, 711, 722, 723, 724, 732, 734,
+ 735, 733, 736, 737, 738, 741, 742, 743, 744, 739,
+ 740, 745, 686, 690, 687, 688, 689, 701, 691, 692,
+ 693, 694, 695, 696, 697, 698, 699, 700, 784, 785,
+ 786, 787, 788, 789, 715, 719, 718, 716, 717, 713,
+ 714, 667, 190, 211, 352, 0, 434, 275, 559, 532,
+ 527, 197, 213, 775, 249, 776, 0, 0, 780, 0,
+ 0, 0, 782, 781, 0, 783, 749, 748, 0, 0,
+ 777, 778, 0, 779, 0, 0, 192, 193, 200, 212,
222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
+ 480, 484, 507, 509, 521, 539, 544, 460, 790, 791,
+ 792, 793, 794, 795, 796, 797, 286, 516, 545, 0,
0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
+ 481, 217, 446, 277, 240, 815, 0, 202, 236, 220,
246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 0, 380, 360, 497, 498, 302, 80, 496, 0,
+ 416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
+ 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
+ 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
+ 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
+ 303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
+ 384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
+ 396, 477, 273, 0, 0, 0, 0, 0, 627, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
+ 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
+ 327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
+ 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
+ 0, 0, 219, 0, 0, 0, 0, 1313, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1321, 1323,
+ 1324, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
+ 385, 244, 0, 0, 0, 0, 541, 0, 0, 0,
+ 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
+ 0, 395, 441, 453, 0, 0, 0, 242, 0, 451,
+ 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
+ 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
+ 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
+ 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
+ 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
+ 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
+ 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
+ 511, 243, 561, 218, 536, 210, 0, 535, 391, 505,
+ 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
+ 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
+ 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
+ 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
+ 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
+ 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
+ 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 563, 564, 565, 566, 567, 568, 569, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 581, 368,
+ 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
+ 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
+ 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
+ 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
+ 528, 295, 448, 447, 317, 318, 363, 431, 0, 190,
+ 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
+ 0, 249, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 192, 193, 200, 212, 222, 226, 233,
+ 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
+ 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
+ 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
+ 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
+ 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
+ 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
+ 301, 556, 557, 286, 516, 545, 0, 0, 362, 0,
+ 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
+ 277, 240, 0, 0, 202, 236, 220, 246, 261, 264,
+ 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
+ 464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
+ 360, 497, 498, 302, 496, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
+ 0, 0, 0, 0, 0, 257, 0, 0, 0, 0,
+ 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
+ 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
+ 486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
+ 0, 0, 0, 0, 0, 627, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
+ 0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
+ 341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
+ 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
+ 0, 976, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 262, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
+ 0, 0, 975, 541, 0, 0, 0, 0, 0, 972,
+ 973, 349, 933, 316, 191, 215, 966, 970, 395, 441,
+ 453, 0, 0, 0, 242, 0, 451, 409, 520, 223,
+ 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
+ 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
+ 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
+ 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
+ 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
+ 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
+ 0, 437, 255, 280, 245, 398, 510, 511, 243, 561,
+ 218, 536, 210, 0, 535, 391, 505, 514, 378, 367,
+ 209, 512, 376, 366, 320, 339, 340, 267, 293, 429,
+ 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
+ 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
+ 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
+ 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
+ 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
+ 0, 0, 408, 452, 230, 522, 475, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 563, 564,
+ 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 580, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 581, 368, 465, 519, 321,
+ 333, 336, 326, 345, 0, 346, 322, 323, 328, 330,
+ 331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
+ 499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
+ 534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
+ 447, 317, 318, 363, 431, 0, 190, 211, 352, 0,
+ 434, 275, 559, 532, 527, 197, 213, 0, 249, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
+ 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
+ 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
+ 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
+ 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
+ 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
+ 544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
+ 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
+ 291, 306, 0, 531, 481, 217, 446, 277, 240, 0,
+ 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
+ 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
+ 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
+ 302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
+ 0, 0, 257, 0, 0, 0, 0, 350, 254, 0,
+ 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
+ 269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
+ 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
+ 0, 1460, 800, 0, 0, 1457, 0, 0, 0, 0,
+ 1455, 0, 228, 1456, 1454, 235, 1459, 0, 765, 335,
+ 344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
+ 0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
+ 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 284, 0, 385, 244, 0, 0, 0, 0,
+ 541, 0, 0, 0, 0, 0, 0, 0, 349, 0,
+ 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
+ 0, 242, 0, 451, 409, 520, 223, 271, 438, 415,
+ 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
+ 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
+ 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
+ 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
+ 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
+ 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
+ 280, 245, 398, 510, 511, 243, 561, 218, 536, 210,
+ 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
+ 366, 320, 339, 340, 267, 293, 429, 359, 430, 292,
+ 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
+ 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
+ 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
+ 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
+ 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
+ 452, 230, 522, 475, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 563, 564, 565, 566, 567,
+ 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
+ 578, 579, 580, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 581, 368, 465, 519, 321, 333, 336, 326,
+ 345, 0, 346, 322, 323, 328, 330, 331, 332, 337,
+ 338, 342, 348, 238, 201, 374, 382, 499, 298, 206,
+ 207, 208, 492, 493, 494, 495, 533, 534, 538, 442,
+ 443, 444, 445, 279, 528, 295, 448, 447, 317, 318,
+ 363, 431, 0, 190, 211, 352, 0, 434, 275, 559,
+ 532, 527, 197, 213, 0, 249, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 192, 193, 200,
+ 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
+ 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
+ 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
+ 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
+ 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
+ 479, 480, 484, 507, 509, 521, 539, 544, 460, 287,
+ 288, 426, 427, 300, 301, 556, 557, 286, 516, 545,
+ 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
+ 531, 481, 217, 446, 277, 240, 0, 0, 202, 236,
+ 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
+ 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
+ 253, 416, 0, 380, 360, 497, 498, 302, 80, 496,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 399, 0, 0, 0, 0, 0, 0, 0, 0,
+ 257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
+ 0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
+ 256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
+ 0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
+ 311, 195, 396, 477, 273, 0, 89, 0, 0, 0,
+ 188, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
+ 324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
+ 0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
+ 0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 262,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
+ 0, 0, 0, 0, 0, 0, 349, 0, 316, 191,
+ 215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
+ 0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
+ 274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
+ 250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
+ 523, 473, 381, 502, 503, 315, 472, 282, 194, 353,
+ 548, 214, 459, 355, 232, 221, 508, 526, 276, 436,
+ 203, 485, 515, 229, 463, 0, 0, 560, 205, 513,
+ 482, 377, 312, 313, 204, 0, 437, 255, 280, 245,
+ 398, 510, 511, 243, 561, 218, 536, 210, 0, 535,
+ 391, 505, 514, 378, 367, 209, 512, 376, 366, 320,
+ 339, 340, 267, 293, 429, 359, 430, 292, 294, 387,
+ 386, 388, 198, 524, 0, 199, 0, 478, 525, 562,
+ 224, 225, 227, 0, 266, 270, 278, 281, 289, 290,
+ 299, 351, 402, 428, 424, 433, 0, 500, 518, 530,
+ 540, 546, 547, 549, 550, 551, 552, 553, 555, 554,
+ 390, 297, 474, 319, 357, 0, 0, 408, 452, 230,
+ 522, 475, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 563, 564, 565, 566, 567, 568, 569,
+ 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
+ 580, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 581, 368, 465, 519, 321, 333, 336, 326, 345, 0,
+ 346, 322, 323, 328, 330, 331, 332, 337, 338, 342,
+ 348, 238, 201, 374, 382, 499, 298, 206, 207, 208,
+ 492, 493, 494, 495, 533, 534, 538, 442, 443, 444,
+ 445, 279, 528, 295, 448, 447, 317, 318, 363, 431,
+ 0, 190, 211, 352, 88, 434, 275, 559, 532, 527,
+ 197, 213, 0, 249, 0, 0, 0, 0, 0, 0,
+ 2064, 0, 0, 2063, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 192, 193, 200, 212, 222,
+ 226, 233, 248, 263, 265, 272, 285, 296, 304, 305,
+ 308, 314, 364, 370, 371, 372, 373, 392, 393, 394,
+ 397, 400, 401, 404, 406, 407, 410, 414, 418, 419,
+ 420, 421, 423, 425, 435, 440, 454, 455, 456, 457,
+ 458, 461, 462, 467, 468, 469, 470, 471, 479, 480,
+ 484, 507, 509, 521, 539, 544, 460, 287, 288, 426,
+ 427, 300, 301, 556, 557, 286, 516, 545, 0, 0,
+ 362, 0, 0, 365, 268, 291, 306, 0, 531, 481,
+ 217, 446, 277, 240, 0, 0, 202, 236, 220, 246,
+ 261, 264, 310, 375, 383, 412, 417, 283, 258, 234,
+ 439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
+ 1516, 0, 360, 497, 498, 302, 496, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
+ 0, 0, 1518, 0, 0, 0, 0, 257, 0, 0,
+ 0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
+ 466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
+ 369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
+ 477, 273, 0, 0, 0, 0, 1520, 627, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
+ 235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
+ 329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
+ 307, 259, 251, 501, 0, 0, 0, 0, 0, 0,
+ 0, 219, 0, 0, 0, 1294, 0, 1295, 1296, 0,
+ 0, 0, 0, 0, 0, 0, 262, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 284, 0, 385,
+ 244, 0, 0, 0, 0, 541, 0, 0, 0, 0,
+ 0, 0, 0, 349, 0, 316, 191, 215, 0, 0,
+ 395, 441, 453, 0, 0, 0, 242, 0, 451, 409,
+ 520, 223, 271, 438, 415, 449, 422, 274, 0, 0,
+ 450, 356, 506, 432, 517, 542, 543, 250, 389, 529,
+ 490, 537, 558, 216, 247, 403, 483, 523, 473, 381,
+ 502, 503, 315, 472, 282, 194, 353, 548, 214, 459,
+ 355, 232, 221, 508, 526, 276, 436, 203, 485, 515,
+ 229, 463, 0, 0, 560, 205, 513, 482, 377, 312,
+ 313, 204, 0, 437, 255, 280, 245, 398, 510, 511,
+ 243, 561, 218, 536, 210, 0, 535, 391, 505, 514,
+ 378, 367, 209, 512, 376, 366, 320, 339, 340, 267,
+ 293, 429, 359, 430, 292, 294, 387, 386, 388, 198,
+ 524, 0, 199, 0, 478, 525, 562, 224, 225, 227,
+ 0, 266, 270, 278, 281, 289, 290, 299, 351, 402,
+ 428, 424, 433, 0, 500, 518, 530, 540, 546, 547,
+ 549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
+ 319, 357, 0, 0, 408, 452, 230, 522, 475, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
+ 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
+ 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
+ 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
+ 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
+ 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
+ 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
+ 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
+ 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
+ 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
+ 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
+ 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
+ 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
+ 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
+ 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
+ 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
+ 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
+ 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
+ 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
+ 487, 488, 489, 491, 379, 253, 416, 0, 380, 360,
+ 497, 498, 302, 80, 496, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
+ 0, 0, 0, 0, 0, 257, 0, 0, 0, 0,
+ 350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
+ 361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
+ 486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
+ 0, 89, 0, 1497, 0, 627, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
+ 0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
+ 341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
+ 251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 262, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
+ 0, 0, 0, 541, 0, 0, 0, 0, 0, 0,
+ 0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
+ 453, 0, 0, 0, 242, 0, 451, 409, 520, 223,
+ 271, 438, 415, 449, 422, 274, 0, 0, 450, 356,
+ 506, 432, 517, 542, 543, 250, 389, 529, 490, 537,
+ 558, 216, 247, 403, 483, 523, 473, 381, 502, 503,
+ 315, 472, 282, 194, 353, 548, 214, 459, 355, 232,
+ 221, 508, 526, 276, 436, 203, 485, 515, 229, 463,
+ 0, 0, 560, 205, 513, 482, 377, 312, 313, 204,
+ 0, 437, 255, 280, 245, 398, 510, 511, 243, 561,
+ 218, 536, 210, 0, 535, 391, 505, 514, 378, 367,
+ 209, 512, 376, 366, 320, 339, 340, 267, 293, 429,
+ 359, 430, 292, 294, 387, 386, 388, 198, 524, 0,
+ 199, 0, 478, 525, 562, 224, 225, 227, 0, 266,
+ 270, 278, 281, 289, 290, 299, 351, 402, 428, 424,
+ 433, 0, 500, 518, 530, 540, 546, 547, 549, 550,
+ 551, 552, 553, 555, 554, 390, 297, 474, 319, 357,
+ 0, 0, 408, 452, 230, 522, 475, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 563, 564,
+ 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 580, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 581, 368, 465, 519, 321,
+ 333, 336, 326, 345, 0, 346, 322, 323, 328, 330,
+ 331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
+ 499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
+ 534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
+ 447, 317, 318, 363, 431, 0, 190, 211, 352, 88,
+ 434, 275, 559, 532, 527, 197, 213, 0, 249, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 192, 193, 200, 212, 222, 226, 233, 248, 263, 265,
+ 272, 285, 296, 304, 305, 308, 314, 364, 370, 371,
+ 372, 373, 392, 393, 394, 397, 400, 401, 404, 406,
+ 407, 410, 414, 418, 419, 420, 421, 423, 425, 435,
+ 440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
+ 469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
+ 544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
+ 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
+ 291, 306, 0, 531, 481, 217, 446, 277, 240, 0,
+ 0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
+ 412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
+ 489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
+ 302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
+ 0, 0, 257, 0, 0, 0, 0, 350, 254, 0,
+ 0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
+ 269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
+ 354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 309, 237, 311, 195, 396, 477, 273, 0, 89, 0,
+ 0, 0, 188, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
+ 344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
+ 0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
+ 0, 0, 0, 0, 0, 0, 219, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 262, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 284, 0, 385, 244, 0, 0, 0, 0,
+ 541, 0, 0, 0, 0, 0, 0, 0, 349, 0,
+ 316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
+ 0, 242, 0, 451, 409, 520, 223, 271, 438, 415,
+ 449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
+ 542, 543, 250, 389, 529, 490, 537, 558, 216, 247,
+ 403, 483, 523, 473, 381, 502, 503, 315, 472, 282,
+ 194, 353, 548, 214, 459, 355, 232, 221, 508, 526,
+ 276, 436, 203, 485, 515, 229, 463, 0, 0, 560,
+ 205, 513, 482, 377, 312, 313, 204, 0, 437, 255,
+ 280, 245, 398, 510, 511, 243, 561, 218, 536, 210,
+ 0, 535, 391, 505, 514, 378, 367, 209, 512, 376,
+ 366, 320, 339, 340, 267, 293, 429, 359, 430, 292,
+ 294, 387, 386, 388, 198, 524, 0, 199, 0, 478,
+ 525, 562, 224, 225, 227, 0, 266, 270, 278, 281,
+ 289, 290, 299, 351, 402, 428, 424, 433, 0, 500,
+ 518, 530, 540, 546, 547, 549, 550, 551, 552, 553,
+ 555, 554, 390, 297, 474, 319, 357, 0, 0, 408,
+ 452, 230, 522, 475, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 563, 564, 565, 566, 567,
+ 568, 569, 570, 571, 572, 573, 574, 575, 576, 577,
+ 578, 579, 580, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 581, 368, 465, 519, 321, 333, 336, 326,
+ 345, 0, 346, 322, 323, 328, 330, 331, 332, 337,
+ 338, 342, 348, 238, 201, 374, 382, 499, 298, 206,
+ 207, 208, 492, 493, 494, 495, 533, 534, 538, 442,
+ 443, 444, 445, 279, 528, 295, 448, 447, 317, 318,
+ 363, 431, 0, 190, 211, 352, 0, 434, 275, 559,
+ 532, 527, 197, 213, 0, 249, 0, 0, 0, 0,
+ 0, 0, 2064, 0, 0, 2063, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 192, 193, 200,
+ 212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
+ 304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
+ 393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
+ 418, 419, 420, 421, 423, 425, 435, 440, 454, 455,
+ 456, 457, 458, 461, 462, 467, 468, 469, 470, 471,
+ 479, 480, 484, 507, 509, 521, 539, 544, 460, 287,
+ 288, 426, 427, 300, 301, 556, 557, 286, 516, 545,
+ 0, 0, 362, 0, 0, 365, 268, 291, 306, 0,
+ 531, 481, 217, 446, 277, 240, 0, 0, 202, 236,
+ 220, 246, 261, 264, 310, 375, 383, 412, 417, 283,
+ 258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
+ 253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 0, 0, 0, 0, 0, 0, 0, 257,
+ 399, 0, 0, 0, 2014, 0, 0, 0, 0, 257,
0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 89, 0, 1497, 0, 627,
+ 195, 396, 477, 273, 0, 0, 0, 0, 1697, 188,
0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
@@ -3868,7 +3983,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 349, 0, 316, 191, 215,
0, 0, 395, 441, 453, 0, 0, 0, 242, 0,
451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
- 0, 0, 450, 356, 506, 432, 517, 542, 543, 250,
+ 0, 2012, 450, 356, 506, 432, 517, 542, 543, 250,
389, 529, 490, 537, 558, 216, 247, 403, 483, 523,
473, 381, 502, 503, 315, 472, 282, 194, 353, 548,
214, 459, 355, 232, 221, 508, 526, 276, 436, 203,
@@ -3891,7 +4006,7 @@ var yyAct = [...]int{
238, 201, 374, 382, 499, 298, 206, 207, 208, 492,
493, 494, 495, 533, 534, 538, 442, 443, 444, 445,
279, 528, 295, 448, 447, 317, 318, 363, 431, 0,
- 190, 211, 352, 88, 434, 275, 559, 532, 527, 197,
+ 190, 211, 352, 0, 434, 275, 559, 532, 527, 197,
213, 0, 249, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 192, 193, 200, 212, 222, 226,
@@ -3914,14 +4029,14 @@ var yyAct = [...]int{
411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 89, 0, 0, 0, 188, 0, 0, 0,
+ 273, 0, 0, 0, 0, 0, 627, 0, 0, 0,
0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
259, 251, 501, 0, 0, 0, 0, 0, 0, 0,
219, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 262, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 927, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3930,7 +4045,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
- 0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
+ 0, 0, 349, 933, 316, 191, 215, 931, 0, 395,
441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
356, 506, 432, 517, 542, 543, 250, 389, 529, 490,
@@ -3957,7 +4072,7 @@ var yyAct = [...]int{
533, 534, 538, 442, 443, 444, 445, 279, 528, 295,
448, 447, 317, 318, 363, 431, 0, 190, 211, 352,
0, 434, 275, 559, 532, 527, 197, 213, 0, 249,
- 0, 0, 0, 0, 0, 0, 2064, 0, 0, 2063,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
@@ -3997,7 +4112,7 @@ var yyAct = [...]int{
0, 541, 0, 0, 0, 0, 0, 0, 0, 349,
0, 316, 191, 215, 0, 0, 395, 441, 453, 0,
0, 0, 242, 0, 451, 409, 520, 223, 271, 438,
- 415, 449, 422, 274, 0, 2012, 450, 356, 506, 432,
+ 415, 449, 422, 274, 0, 0, 450, 356, 506, 432,
517, 542, 543, 250, 389, 529, 490, 537, 558, 216,
247, 403, 483, 523, 473, 381, 502, 503, 315, 472,
282, 194, 353, 548, 214, 459, 355, 232, 221, 508,
@@ -4043,14 +4158,14 @@ var yyAct = [...]int{
256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
- 311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
+ 311, 195, 396, 477, 273, 0, 0, 0, 1497, 0,
627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
0, 0, 0, 0, 219, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 262,
- 0, 0, 0, 0, 0, 0, 0, 0, 927, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -4059,8 +4174,8 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
- 0, 0, 0, 0, 0, 0, 349, 933, 316, 191,
- 215, 931, 0, 395, 441, 453, 0, 0, 0, 242,
+ 0, 0, 3294, 0, 0, 0, 349, 0, 316, 191,
+ 215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
250, 389, 529, 490, 537, 558, 216, 247, 403, 483,
@@ -4102,13 +4217,13 @@ var yyAct = [...]int{
439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
380, 0, 360, 497, 498, 302, 496, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 0, 0, 2014, 0, 0, 0, 0, 257, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 257, 0, 0,
0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 1697, 188, 0, 0,
+ 477, 273, 0, 0, 0, 0, 1847, 627, 0, 0,
0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
@@ -4117,7 +4232,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 262, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1848, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -4173,7 +4288,7 @@ var yyAct = [...]int{
405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 1497, 0, 627, 0, 0, 0, 0, 0,
+ 0, 0, 0, 2415, 627, 0, 0, 0, 0, 0,
0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
@@ -4182,13 +4297,13 @@ var yyAct = [...]int{
0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2416, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 3294, 0, 0, 0,
+ 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
@@ -4238,15 +4353,15 @@ var yyAct = [...]int{
0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 1847, 627, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
+ 0, 627, 0, 0, 0, 0, 2400, 0, 0, 0,
+ 0, 228, 0, 0, 235, 2401, 0, 0, 335, 344,
343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1848, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -4296,13 +4411,13 @@ var yyAct = [...]int{
234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 257, 1539,
0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 2415, 627, 0,
+ 396, 477, 273, 0, 0, 0, 0, 1538, 627, 0,
0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
@@ -4311,7 +4426,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 2416, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -4367,8 +4482,8 @@ var yyAct = [...]int{
486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 0, 0, 0, 0, 627, 0, 0, 0, 0,
- 2400, 0, 0, 0, 0, 228, 0, 0, 235, 2401,
+ 0, 0, 0, 0, 0, 629, 630, 631, 0, 0,
+ 0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
251, 501, 0, 0, 0, 0, 0, 0, 0, 219,
@@ -4426,13 +4541,13 @@ var yyAct = [...]int{
489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
- 0, 0, 257, 1539, 0, 0, 0, 350, 254, 0,
+ 0, 0, 257, 0, 0, 0, 0, 350, 254, 0,
0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
- 0, 1538, 627, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 627, 0, 0, 0, 0, 0, 0, 0,
0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
@@ -4447,7 +4562,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 284, 0, 385, 244, 0, 0, 0, 0,
- 541, 0, 0, 0, 0, 0, 0, 0, 349, 0,
+ 541, 0, 0, 0, 3419, 0, 0, 0, 349, 0,
316, 191, 215, 0, 0, 395, 441, 453, 0, 0,
0, 242, 0, 451, 409, 520, 223, 271, 438, 415,
449, 422, 274, 0, 0, 450, 356, 506, 432, 517,
@@ -4496,8 +4611,8 @@ var yyAct = [...]int{
239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 0, 0, 0, 0, 629,
- 630, 631, 0, 0, 0, 0, 0, 0, 0, 228,
+ 195, 396, 477, 273, 0, 0, 0, 0, 1697, 188,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
0, 252, 307, 259, 251, 501, 0, 0, 0, 0,
@@ -4576,7 +4691,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
- 0, 0, 0, 0, 541, 0, 0, 0, 3418, 0,
+ 0, 0, 0, 0, 541, 0, 0, 0, 3294, 0,
0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
223, 271, 438, 415, 449, 422, 274, 0, 0, 450,
@@ -4625,8 +4740,8 @@ var yyAct = [...]int{
504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
0, 354, 0, 0, 476, 384, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 1697, 188, 0, 0, 0, 0, 0, 0,
+ 0, 309, 237, 311, 195, 396, 477, 273, 0, 89,
+ 0, 0, 0, 627, 0, 0, 0, 0, 0, 0,
0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
0, 0, 0, 0, 0, 252, 307, 259, 251, 501,
@@ -4683,7 +4798,7 @@ var yyAct = [...]int{
236, 220, 246, 261, 264, 310, 375, 383, 412, 417,
283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 2065, 0, 0, 0, 0, 0,
0, 399, 0, 0, 0, 0, 0, 0, 0, 0,
257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
@@ -4691,7 +4806,7 @@ var yyAct = [...]int{
0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
- 627, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 188, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
@@ -4706,7 +4821,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
- 0, 0, 3294, 0, 0, 0, 349, 0, 316, 191,
+ 0, 0, 0, 0, 0, 0, 349, 0, 316, 191,
215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
274, 0, 0, 450, 356, 506, 432, 517, 542, 543,
@@ -4755,7 +4870,7 @@ var yyAct = [...]int{
369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 89, 0, 0, 0, 627, 0, 0,
+ 477, 273, 0, 0, 0, 0, 1520, 627, 0, 0,
0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
@@ -4812,7 +4927,7 @@ var yyAct = [...]int{
240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 0, 0, 0, 2065, 0,
+ 497, 498, 302, 496, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
@@ -4861,7 +4976,7 @@ var yyAct = [...]int{
332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 0, 434,
+ 317, 318, 363, 431, 0, 190, 211, 352, 1802, 434,
275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
@@ -4885,7 +5000,7 @@ var yyAct = [...]int{
0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 1520, 627, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1794, 627, 0, 0, 0, 0, 0, 0, 0, 0,
0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
@@ -4943,7 +5058,7 @@ var yyAct = [...]int{
234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
+ 0, 1664, 0, 0, 0, 0, 0, 0, 257, 0,
0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
@@ -4958,7 +5073,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1327, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -5007,14 +5122,14 @@ var yyAct = [...]int{
310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
464, 487, 488, 489, 491, 379, 253, 416, 380, 0,
360, 497, 498, 302, 496, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 399, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 399, 0, 1662, 0,
0, 0, 0, 0, 0, 257, 0, 0, 0, 0,
350, 254, 0, 0, 413, 0, 196, 0, 466, 241,
361, 358, 504, 269, 260, 256, 239, 303, 369, 411,
486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 0, 0, 0, 0, 188, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 627, 0, 0, 0, 0,
0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
@@ -5055,7 +5170,7 @@ var yyAct = [...]int{
331, 332, 337, 338, 342, 348, 238, 201, 374, 382,
499, 298, 206, 207, 208, 492, 493, 494, 495, 533,
534, 538, 442, 443, 444, 445, 279, 528, 295, 448,
- 447, 317, 318, 363, 431, 0, 190, 211, 352, 1802,
+ 447, 317, 318, 363, 431, 0, 190, 211, 352, 0,
434, 275, 559, 532, 527, 197, 213, 0, 249, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -5072,14 +5187,14 @@ var yyAct = [...]int{
412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 399, 0, 1660, 0, 0, 0, 0,
0, 0, 257, 0, 0, 0, 0, 350, 254, 0,
0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
309, 237, 311, 195, 396, 477, 273, 0, 0, 0,
- 0, 1794, 627, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 627, 0, 0, 0, 0, 0, 0, 0,
0, 0, 228, 0, 0, 235, 0, 0, 0, 335,
344, 343, 324, 325, 327, 329, 334, 341, 347, 0,
0, 0, 0, 0, 252, 307, 259, 251, 501, 0,
@@ -5137,7 +5252,7 @@ var yyAct = [...]int{
258, 234, 439, 231, 464, 487, 488, 489, 491, 379,
253, 416, 380, 0, 360, 497, 498, 302, 496, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 399, 0, 1664, 0, 0, 0, 0, 0, 0, 257,
+ 399, 0, 1658, 0, 0, 0, 0, 0, 0, 257,
0, 0, 0, 0, 350, 254, 0, 0, 413, 0,
196, 0, 466, 241, 361, 358, 504, 269, 260, 256,
239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
@@ -5201,7 +5316,7 @@ var yyAct = [...]int{
264, 310, 375, 383, 412, 417, 283, 258, 234, 439,
231, 464, 487, 488, 489, 491, 379, 253, 416, 380,
0, 360, 497, 498, 302, 496, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 399, 0, 1662,
+ 0, 0, 0, 0, 0, 0, 0, 399, 0, 1656,
0, 0, 0, 0, 0, 0, 257, 0, 0, 0,
0, 350, 254, 0, 0, 413, 0, 196, 0, 466,
241, 361, 358, 504, 269, 260, 256, 239, 303, 369,
@@ -5266,7 +5381,7 @@ var yyAct = [...]int{
383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
488, 489, 491, 379, 253, 416, 380, 0, 360, 497,
498, 302, 496, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 399, 0, 1660, 0, 0, 0,
+ 0, 0, 0, 0, 399, 0, 1652, 0, 0, 0,
0, 0, 0, 257, 0, 0, 0, 0, 350, 254,
0, 0, 413, 0, 196, 0, 466, 241, 361, 358,
504, 269, 260, 256, 239, 303, 369, 411, 486, 405,
@@ -5331,7 +5446,7 @@ var yyAct = [...]int{
283, 258, 234, 439, 231, 464, 487, 488, 489, 491,
379, 253, 416, 380, 0, 360, 497, 498, 302, 496,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 399, 0, 1658, 0, 0, 0, 0, 0, 0,
+ 0, 399, 0, 1650, 0, 0, 0, 0, 0, 0,
257, 0, 0, 0, 0, 350, 254, 0, 0, 413,
0, 196, 0, 466, 241, 361, 358, 504, 269, 260,
256, 239, 303, 369, 411, 486, 405, 0, 354, 0,
@@ -5396,7 +5511,7 @@ var yyAct = [...]int{
439, 231, 464, 487, 488, 489, 491, 379, 253, 416,
380, 0, 360, 497, 498, 302, 496, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 399, 0,
- 1656, 0, 0, 0, 0, 0, 0, 257, 0, 0,
+ 1648, 0, 0, 0, 0, 0, 0, 257, 0, 0,
0, 0, 350, 254, 0, 0, 413, 0, 196, 0,
466, 241, 361, 358, 504, 269, 260, 256, 239, 303,
369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
@@ -5460,14 +5575,14 @@ var yyAct = [...]int{
375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
497, 498, 302, 496, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 1652, 0, 0,
+ 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 627, 0, 0, 0, 0, 0,
+ 1623, 0, 0, 0, 627, 0, 0, 0, 0, 0,
0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
@@ -5525,14 +5640,14 @@ var yyAct = [...]int{
417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 1650, 0, 0, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
+ 0, 0, 399, 0, 0, 0, 0, 0, 0, 0,
+ 1524, 257, 0, 0, 0, 0, 350, 254, 0, 0,
413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 0, 627, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 188, 0, 0, 0, 0, 0, 0, 0, 0,
0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
@@ -5590,13 +5705,13 @@ var yyAct = [...]int{
234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 1648, 0, 0, 0, 0, 0, 0, 257, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 0, 627, 0,
+ 396, 477, 273, 0, 89, 0, 0, 0, 800, 0,
0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
@@ -5661,7 +5776,7 @@ var yyAct = [...]int{
486, 405, 0, 354, 0, 0, 476, 384, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 309, 237, 311, 195, 396, 477, 273,
- 0, 1623, 0, 0, 0, 627, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 188, 0, 0, 0, 0,
0, 0, 0, 0, 0, 228, 0, 0, 235, 0,
0, 0, 335, 344, 343, 324, 325, 327, 329, 334,
341, 347, 0, 0, 0, 0, 0, 252, 307, 259,
@@ -5675,7 +5790,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 284, 0, 385, 244, 0,
+ 0, 0, 0, 1277, 0, 284, 0, 385, 244, 0,
0, 0, 0, 541, 0, 0, 0, 0, 0, 0,
0, 349, 0, 316, 191, 215, 0, 0, 395, 441,
453, 0, 0, 0, 242, 0, 451, 409, 520, 223,
@@ -5713,14 +5828,14 @@ var yyAct = [...]int{
440, 454, 455, 456, 457, 458, 461, 462, 467, 468,
469, 470, 471, 479, 480, 484, 507, 509, 521, 539,
544, 460, 287, 288, 426, 427, 300, 301, 556, 557,
- 286, 516, 545, 0, 0, 362, 0, 0, 365, 268,
+ 1276, 516, 545, 0, 0, 362, 0, 0, 365, 268,
291, 306, 0, 531, 481, 217, 446, 277, 240, 0,
0, 202, 236, 220, 246, 261, 264, 310, 375, 383,
412, 417, 283, 258, 234, 439, 231, 464, 487, 488,
489, 491, 379, 253, 416, 380, 0, 360, 497, 498,
302, 496, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 399, 0, 0, 0, 0, 0, 0,
- 0, 1524, 257, 0, 0, 0, 0, 350, 254, 0,
+ 0, 0, 257, 0, 0, 0, 0, 350, 254, 0,
0, 413, 0, 196, 0, 466, 241, 361, 358, 504,
269, 260, 256, 239, 303, 369, 411, 486, 405, 0,
354, 0, 0, 476, 384, 0, 0, 0, 0, 0,
@@ -5770,7 +5885,7 @@ var yyAct = [...]int{
363, 431, 0, 190, 211, 352, 0, 434, 275, 559,
532, 527, 197, 213, 0, 249, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 192, 193, 200,
+ 0, 0, 0, 886, 0, 0, 0, 192, 193, 200,
212, 222, 226, 233, 248, 263, 265, 272, 285, 296,
304, 305, 308, 314, 364, 370, 371, 372, 373, 392,
393, 394, 397, 400, 401, 404, 406, 407, 410, 414,
@@ -5790,7 +5905,7 @@ var yyAct = [...]int{
239, 303, 369, 411, 486, 405, 0, 354, 0, 0,
476, 384, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 309, 237, 311,
- 195, 396, 477, 273, 0, 89, 0, 0, 0, 800,
+ 195, 396, 477, 273, 0, 0, 0, 0, 0, 188,
0, 0, 0, 0, 0, 0, 0, 0, 0, 228,
0, 0, 235, 0, 0, 0, 335, 344, 343, 324,
325, 327, 329, 334, 341, 347, 0, 0, 0, 0,
@@ -5805,7 +5920,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 284,
- 0, 385, 244, 0, 0, 0, 0, 541, 0, 0,
+ 0, 385, 244, 0, 0, 584, 0, 541, 0, 0,
0, 0, 0, 0, 0, 349, 0, 316, 191, 215,
0, 0, 395, 441, 453, 0, 0, 0, 242, 0,
451, 409, 520, 223, 271, 438, 415, 449, 422, 274,
@@ -5855,7 +5970,7 @@ var yyAct = [...]int{
411, 486, 405, 0, 354, 0, 0, 476, 384, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 309, 237, 311, 195, 396, 477,
- 273, 0, 0, 0, 0, 0, 188, 0, 0, 0,
+ 273, 0, 0, 0, 0, 0, 627, 0, 0, 0,
0, 0, 0, 0, 0, 0, 228, 0, 0, 235,
0, 0, 0, 335, 344, 343, 324, 325, 327, 329,
334, 341, 347, 0, 0, 0, 0, 0, 252, 307,
@@ -5869,7 +5984,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1277, 0, 284, 0, 385, 244,
+ 0, 0, 0, 0, 0, 0, 284, 0, 385, 244,
0, 0, 0, 0, 541, 0, 0, 0, 0, 0,
0, 0, 349, 0, 316, 191, 215, 0, 0, 395,
441, 453, 0, 0, 0, 242, 0, 451, 409, 520,
@@ -5902,12 +6017,12 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 192, 193, 200, 212, 222, 226, 233, 248, 263,
265, 272, 285, 296, 304, 305, 308, 314, 364, 370,
- 371, 372, 373, 392, 393, 394, 397, 400, 401, 404,
+ 371, 372, 373, 3427, 393, 394, 397, 400, 401, 404,
406, 407, 410, 414, 418, 419, 420, 421, 423, 425,
435, 440, 454, 455, 456, 457, 458, 461, 462, 467,
468, 469, 470, 471, 479, 480, 484, 507, 509, 521,
539, 544, 460, 287, 288, 426, 427, 300, 301, 556,
- 557, 1276, 516, 545, 0, 0, 362, 0, 0, 365,
+ 557, 286, 516, 545, 0, 0, 362, 0, 0, 365,
268, 291, 306, 0, 531, 481, 217, 446, 277, 240,
0, 0, 202, 236, 220, 246, 261, 264, 310, 375,
383, 412, 417, 283, 258, 234, 439, 231, 464, 487,
@@ -5920,7 +6035,7 @@ var yyAct = [...]int{
0, 354, 0, 0, 476, 384, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 309, 237, 311, 195, 396, 477, 273, 0, 0,
- 0, 0, 0, 188, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 627, 0, 0, 0, 0, 0, 0,
0, 0, 0, 228, 0, 0, 235, 0, 0, 0,
335, 344, 343, 324, 325, 327, 329, 334, 341, 347,
0, 0, 0, 0, 0, 252, 307, 259, 251, 501,
@@ -5964,7 +6079,7 @@ var yyAct = [...]int{
318, 363, 431, 0, 190, 211, 352, 0, 434, 275,
559, 532, 527, 197, 213, 0, 249, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 886, 0, 0, 0, 192, 193,
+ 0, 0, 0, 0, 0, 0, 0, 0, 192, 193,
200, 212, 222, 226, 233, 248, 263, 265, 272, 285,
296, 304, 305, 308, 314, 364, 370, 371, 372, 373,
392, 393, 394, 397, 400, 401, 404, 406, 407, 410,
@@ -5985,7 +6100,7 @@ var yyAct = [...]int{
0, 476, 384, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 309, 237,
311, 195, 396, 477, 273, 0, 0, 0, 0, 0,
- 188, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 800, 0, 0, 0, 0, 0, 0, 0, 0, 0,
228, 0, 0, 235, 0, 0, 0, 335, 344, 343,
324, 325, 327, 329, 334, 341, 347, 0, 0, 0,
0, 0, 252, 307, 259, 251, 501, 0, 0, 0,
@@ -5999,7 +6114,7 @@ var yyAct = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 284, 0, 385, 244, 0, 0, 584, 0, 541, 0,
+ 284, 0, 385, 244, 0, 0, 0, 0, 541, 0,
0, 0, 0, 0, 0, 0, 349, 0, 316, 191,
215, 0, 0, 395, 441, 453, 0, 0, 0, 242,
0, 451, 409, 520, 223, 271, 438, 415, 449, 422,
@@ -6049,7 +6164,7 @@ var yyAct = [...]int{
369, 411, 486, 405, 0, 354, 0, 0, 476, 384,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 309, 237, 311, 195, 396,
- 477, 273, 0, 0, 0, 0, 0, 627, 0, 0,
+ 477, 273, 0, 0, 0, 0, 0, 188, 0, 0,
0, 0, 0, 0, 0, 0, 0, 228, 0, 0,
235, 0, 0, 0, 335, 344, 343, 324, 325, 327,
329, 334, 341, 347, 0, 0, 0, 0, 0, 252,
@@ -6083,246 +6198,52 @@ var yyAct = [...]int{
549, 550, 551, 552, 553, 555, 554, 390, 297, 474,
319, 357, 0, 0, 408, 452, 230, 522, 475, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
- 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
- 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
- 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
- 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
- 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
- 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
- 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
- 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
- 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
- 370, 371, 372, 373, 3426, 393, 394, 397, 400, 401,
- 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
- 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
- 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
- 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
- 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
- 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
- 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
- 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
- 487, 488, 489, 491, 379, 253, 416, 380, 0, 360,
- 497, 498, 302, 496, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 399, 0, 0, 0, 0,
- 0, 0, 0, 0, 257, 0, 0, 0, 0, 350,
- 254, 0, 0, 413, 0, 196, 0, 466, 241, 361,
- 358, 504, 269, 260, 256, 239, 303, 369, 411, 486,
- 405, 0, 354, 0, 0, 476, 384, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 309, 237, 311, 195, 396, 477, 273, 0,
- 0, 0, 0, 0, 627, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 228, 0, 0, 235, 0, 0,
- 0, 335, 344, 343, 324, 325, 327, 329, 334, 341,
- 347, 0, 0, 0, 0, 0, 252, 307, 259, 251,
- 501, 0, 0, 0, 0, 0, 0, 0, 219, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 262, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 284, 0, 385, 244, 0, 0,
- 0, 0, 541, 0, 0, 0, 0, 0, 0, 0,
- 349, 0, 316, 191, 215, 0, 0, 395, 441, 453,
- 0, 0, 0, 242, 0, 451, 409, 520, 223, 271,
- 438, 415, 449, 422, 274, 0, 0, 450, 356, 506,
- 432, 517, 542, 543, 250, 389, 529, 490, 537, 558,
- 216, 247, 403, 483, 523, 473, 381, 502, 503, 315,
- 472, 282, 194, 353, 548, 214, 459, 355, 232, 221,
- 508, 526, 276, 436, 203, 485, 515, 229, 463, 0,
- 0, 560, 205, 513, 482, 377, 312, 313, 204, 0,
- 437, 255, 280, 245, 398, 510, 511, 243, 561, 218,
- 536, 210, 0, 535, 391, 505, 514, 378, 367, 209,
- 512, 376, 366, 320, 339, 340, 267, 293, 429, 359,
- 430, 292, 294, 387, 386, 388, 198, 524, 0, 199,
- 0, 478, 525, 562, 224, 225, 227, 0, 266, 270,
- 278, 281, 289, 290, 299, 351, 402, 428, 424, 433,
- 0, 500, 518, 530, 540, 546, 547, 549, 550, 551,
- 552, 553, 555, 554, 390, 297, 474, 319, 357, 0,
- 0, 408, 452, 230, 522, 475, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 563, 564, 565,
- 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
- 576, 577, 578, 579, 580, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 581, 368, 465, 519, 321, 333,
- 336, 326, 345, 0, 346, 322, 323, 328, 330, 331,
- 332, 337, 338, 342, 348, 238, 201, 374, 382, 499,
- 298, 206, 207, 208, 492, 493, 494, 495, 533, 534,
- 538, 442, 443, 444, 445, 279, 528, 295, 448, 447,
- 317, 318, 363, 431, 0, 190, 211, 352, 0, 434,
- 275, 559, 532, 527, 197, 213, 0, 249, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 192,
- 193, 200, 212, 222, 226, 233, 248, 263, 265, 272,
- 285, 296, 304, 305, 308, 314, 364, 370, 371, 372,
- 373, 392, 393, 394, 397, 400, 401, 404, 406, 407,
- 410, 414, 418, 419, 420, 421, 423, 425, 435, 440,
- 454, 455, 456, 457, 458, 461, 462, 467, 468, 469,
- 470, 471, 479, 480, 484, 507, 509, 521, 539, 544,
- 460, 287, 288, 426, 427, 300, 301, 556, 557, 286,
- 516, 545, 0, 0, 362, 0, 0, 365, 268, 291,
- 306, 0, 531, 481, 217, 446, 277, 240, 0, 0,
- 202, 236, 220, 246, 261, 264, 310, 375, 383, 412,
- 417, 283, 258, 234, 439, 231, 464, 487, 488, 489,
- 491, 379, 253, 416, 380, 0, 360, 497, 498, 302,
- 496, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 399, 0, 0, 0, 0, 0, 0, 0,
- 0, 257, 0, 0, 0, 0, 350, 254, 0, 0,
- 413, 0, 196, 0, 466, 241, 361, 358, 504, 269,
- 260, 256, 239, 303, 369, 411, 486, 405, 0, 354,
- 0, 0, 476, 384, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 309,
- 237, 311, 195, 396, 477, 273, 0, 0, 0, 0,
- 0, 800, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 228, 0, 0, 235, 0, 0, 0, 335, 344,
- 343, 324, 325, 327, 329, 334, 341, 347, 0, 0,
- 0, 0, 0, 252, 307, 259, 251, 501, 0, 0,
- 0, 0, 0, 0, 0, 219, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 262, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 284, 0, 385, 244, 0, 0, 0, 0, 541,
- 0, 0, 0, 0, 0, 0, 0, 349, 0, 316,
- 191, 215, 0, 0, 395, 441, 453, 0, 0, 0,
- 242, 0, 451, 409, 520, 223, 271, 438, 415, 449,
- 422, 274, 0, 0, 450, 356, 506, 432, 517, 542,
- 543, 250, 389, 529, 490, 537, 558, 216, 247, 403,
- 483, 523, 473, 381, 502, 503, 315, 472, 282, 194,
- 353, 548, 214, 459, 355, 232, 221, 508, 526, 276,
- 436, 203, 485, 515, 229, 463, 0, 0, 560, 205,
- 513, 482, 377, 312, 313, 204, 0, 437, 255, 280,
- 245, 398, 510, 511, 243, 561, 218, 536, 210, 0,
- 535, 391, 505, 514, 378, 367, 209, 512, 376, 366,
- 320, 339, 340, 267, 293, 429, 359, 430, 292, 294,
- 387, 386, 388, 198, 524, 0, 199, 0, 478, 525,
- 562, 224, 225, 227, 0, 266, 270, 278, 281, 289,
- 290, 299, 351, 402, 428, 424, 433, 0, 500, 518,
- 530, 540, 546, 547, 549, 550, 551, 552, 553, 555,
- 554, 390, 297, 474, 319, 357, 0, 0, 408, 452,
- 230, 522, 475, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 563, 564, 565, 566, 567, 568,
- 569, 570, 571, 572, 573, 574, 575, 576, 577, 578,
- 579, 580, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 581, 368, 465, 519, 321, 333, 336, 326, 345,
- 0, 346, 322, 323, 328, 330, 331, 332, 337, 338,
- 342, 348, 238, 201, 374, 382, 499, 298, 206, 207,
- 208, 492, 493, 494, 495, 533, 534, 538, 442, 443,
- 444, 445, 279, 528, 295, 448, 447, 317, 318, 363,
- 431, 0, 190, 211, 352, 0, 434, 275, 559, 532,
- 527, 197, 213, 0, 249, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 192, 193, 200, 212,
- 222, 226, 233, 248, 263, 265, 272, 285, 296, 304,
- 305, 308, 314, 364, 370, 371, 372, 373, 392, 393,
- 394, 397, 400, 401, 404, 406, 407, 410, 414, 418,
- 419, 420, 421, 423, 425, 435, 440, 454, 455, 456,
- 457, 458, 461, 462, 467, 468, 469, 470, 471, 479,
- 480, 484, 507, 509, 521, 539, 544, 460, 287, 288,
- 426, 427, 300, 301, 556, 557, 286, 516, 545, 0,
- 0, 362, 0, 0, 365, 268, 291, 306, 0, 531,
- 481, 217, 446, 277, 240, 0, 0, 202, 236, 220,
- 246, 261, 264, 310, 375, 383, 412, 417, 283, 258,
- 234, 439, 231, 464, 487, 488, 489, 491, 379, 253,
- 416, 380, 0, 360, 497, 498, 302, 496, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 399,
- 0, 0, 0, 0, 0, 0, 0, 0, 257, 0,
- 0, 0, 0, 350, 254, 0, 0, 413, 0, 196,
- 0, 466, 241, 361, 358, 504, 269, 260, 256, 239,
- 303, 369, 411, 486, 405, 0, 354, 0, 0, 476,
- 384, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 309, 237, 311, 195,
- 396, 477, 273, 0, 0, 0, 0, 0, 188, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 228, 0,
- 0, 235, 0, 0, 0, 335, 344, 343, 324, 325,
- 327, 329, 334, 341, 347, 0, 0, 0, 0, 0,
- 252, 307, 259, 251, 501, 0, 0, 0, 0, 0,
- 0, 0, 219, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 262, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 284, 0,
- 385, 244, 0, 0, 0, 0, 541, 0, 0, 0,
- 0, 0, 0, 0, 349, 0, 316, 191, 215, 0,
- 0, 395, 441, 453, 0, 0, 0, 242, 0, 451,
- 409, 520, 223, 271, 438, 415, 449, 422, 274, 0,
- 0, 450, 356, 506, 432, 517, 542, 543, 250, 389,
- 529, 490, 537, 558, 216, 247, 403, 483, 523, 473,
- 381, 502, 503, 315, 472, 282, 194, 353, 548, 214,
- 459, 355, 232, 221, 508, 526, 276, 436, 203, 485,
- 515, 229, 463, 0, 0, 560, 205, 513, 482, 377,
- 312, 313, 204, 0, 437, 255, 280, 245, 398, 510,
- 511, 243, 561, 218, 536, 210, 0, 535, 391, 505,
- 514, 378, 367, 209, 512, 376, 366, 320, 339, 340,
- 267, 293, 429, 359, 430, 292, 294, 387, 386, 388,
- 198, 524, 0, 199, 0, 478, 525, 562, 224, 225,
- 227, 0, 266, 270, 278, 281, 289, 290, 299, 351,
- 402, 428, 424, 433, 0, 500, 518, 530, 540, 546,
- 547, 549, 550, 551, 552, 553, 555, 554, 390, 297,
- 474, 319, 357, 0, 0, 408, 452, 230, 522, 475,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 563, 564, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 574, 575, 576, 577, 578, 579, 580, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 581, 368,
- 465, 519, 321, 333, 336, 326, 345, 0, 346, 322,
- 323, 328, 330, 331, 332, 337, 338, 342, 348, 238,
- 201, 374, 382, 499, 298, 206, 207, 208, 492, 493,
- 494, 495, 533, 534, 538, 442, 443, 444, 445, 279,
- 528, 295, 448, 447, 317, 318, 363, 431, 0, 190,
- 211, 352, 0, 434, 275, 559, 532, 527, 197, 213,
- 0, 249, 0, 0, 0, 0, 0, 0, 0, 0,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572,
+ 573, 574, 575, 576, 577, 578, 579, 580, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 581, 368, 465,
+ 519, 321, 333, 336, 326, 345, 0, 346, 322, 323,
+ 328, 330, 331, 332, 337, 338, 342, 348, 238, 201,
+ 374, 382, 499, 298, 206, 207, 208, 492, 493, 494,
+ 495, 533, 534, 538, 442, 443, 444, 445, 279, 528,
+ 295, 448, 447, 317, 318, 363, 431, 0, 190, 211,
+ 352, 0, 434, 275, 559, 532, 527, 197, 213, 0,
+ 249, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 192, 193, 200, 212, 222, 226, 233,
- 248, 263, 265, 272, 285, 296, 304, 305, 308, 314,
- 364, 370, 371, 372, 373, 392, 393, 394, 397, 400,
- 401, 404, 406, 407, 410, 414, 418, 419, 420, 421,
- 423, 425, 435, 440, 454, 455, 456, 457, 458, 461,
- 462, 467, 468, 469, 470, 471, 479, 480, 484, 507,
- 509, 521, 539, 544, 460, 287, 288, 426, 427, 300,
- 301, 556, 557, 286, 516, 545, 0, 0, 362, 0,
- 0, 365, 268, 291, 306, 0, 531, 481, 217, 446,
- 277, 240, 0, 0, 202, 236, 220, 246, 261, 264,
- 310, 375, 383, 412, 417, 283, 258, 234, 439, 231,
- 464, 487, 488, 489, 491, 379, 253, 416, 0, 0,
- 360, 497, 498, 302,
+ 0, 0, 192, 193, 200, 212, 222, 226, 233, 248,
+ 263, 265, 272, 285, 296, 304, 305, 308, 314, 364,
+ 370, 371, 372, 373, 392, 393, 394, 397, 400, 401,
+ 404, 406, 407, 410, 414, 418, 419, 420, 421, 423,
+ 425, 435, 440, 454, 455, 456, 457, 458, 461, 462,
+ 467, 468, 469, 470, 471, 479, 480, 484, 507, 509,
+ 521, 539, 544, 460, 287, 288, 426, 427, 300, 301,
+ 556, 557, 286, 516, 545, 0, 0, 362, 0, 0,
+ 365, 268, 291, 306, 0, 531, 481, 217, 446, 277,
+ 240, 0, 0, 202, 236, 220, 246, 261, 264, 310,
+ 375, 383, 412, 417, 283, 258, 234, 439, 231, 464,
+ 487, 488, 489, 491, 379, 253, 416, 0, 0, 360,
+ 497, 498, 302,
}
var yyPact = [...]int{
- -1000, -1000, 5263, -1000, -458, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 5826, -1000, -455, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2260, 2300,
- -1000, -1000, -1000, -1000, 2433, -1000, 875, 1965, -1000, 2261,
- 4625, -1000, 47207, 621, -1000, 44619, 620, 630, 29738, -1000,
- 209, -1000, 164, 45913, 202, -1000, -1000, -1000, -292, 19384,
- 2183, 78, 77, 47207, -1000, -1000, -1000, -1000, 2398, 1911,
- -1000, 394, -1000, -1000, -1000, -1000, -1000, -1000, 43972, -1000,
- 1009, -1000, -1000, 2269, 2231, 2444, 787, 2165, -1000, 2326,
- 1911, -1000, 19384, 2387, 2307, 18737, 18737, 569, -1000, -1000,
- 348, -1000, -1000, 25209, 47207, 32326, 586, -1000, 2261, -1000,
- -1000, -1000, 99, -1000, 470, 1831, -1000, 1830, -1000, 675,
- 864, 485, 588, 578, 481, 473, 472, 471, 469, 467,
- 454, 453, 492, -1000, 814, 814, -111, -115, 3182, 567,
- 545, 545, 1011, 585, 2213, 2203, -1000, -1000, 814, 814,
- 814, 479, 814, 814, 814, 814, 383, 382, 814, 814,
- 814, 814, 814, 814, 814, 814, 814, 814, 814, 814,
- 814, 814, 814, 814, 814, 422, 2261, 364, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2148, 2198,
+ -1000, -1000, -1000, -1000, 2370, -1000, 776, 1786, -1000, 2147,
+ 309, -1000, 46536, 572, -1000, 43948, 571, 224, 29714, -1000,
+ 218, -1000, 203, 45242, 212, -1000, -1000, -1000, -305, 18713,
+ 2037, 94, 92, 46536, -1000, -1000, -1000, -1000, 2334, 1745,
+ -1000, 445, -1000, -1000, -1000, -1000, -1000, -1000, 43301, -1000,
+ 932, -1000, -1000, 2159, 2149, 2372, 700, 2100, -1000, 2240,
+ 1745, -1000, 18713, 2313, 2207, 18066, 18066, 500, -1000, -1000,
+ 238, -1000, -1000, 25185, 46536, 32302, 721, -1000, 2147, -1000,
+ -1000, -1000, 159, -1000, 369, 1670, -1000, 1669, -1000, 766,
+ 888, 427, 509, 505, 418, 417, 404, 383, 382, 381,
+ 372, 364, 433, -1000, 724, 724, -120, -121, 280, 553,
+ 490, 490, 935, 521, 2118, 2117, -1000, -1000, 724, 724,
+ 724, 378, 724, 724, 724, 724, 331, 324, 724, 724,
+ 724, 724, 724, 724, 724, 724, 724, 724, 724, 724,
+ 724, 724, 724, 724, 724, 397, 2147, 299, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
@@ -6362,59 +6283,59 @@ var yyPact = [...]int{
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 47207, 374, 47207, -1000, 690, 47207, 955, 955,
- 129, 955, 955, 955, 955, 265, 793, 69, -1000, 256,
- 344, 228, 340, 941, 743, -1000, -1000, 360, 941, 1692,
- -1000, 791, 234, -1000, 955, 955, -1000, 12889, 165, 12889,
- 12889, -1000, 2239, -1000, -1000, -1000, -1000, -1000, 1234, -1000,
- -1000, -1000, -1000, -1000, 583, -1000, -1000, -1000, -1000, 45913,
- 43325, -1000, -1000, 82, -1000, -1000, 1744, 1467, 19384, 1219,
- -1000, 1169, 749, -1000, -1000, -1000, -1000, -1000, 652, -1000,
- 20031, 20031, 20031, 20031, -1000, -1000, 1839, 42678, 1839, 1839,
- 20031, 1839, -1000, 20031, 1839, 1839, 1839, 19384, 1839, 1839,
- 1839, 1839, -1000, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, -1000, -1000, -1000, -1000, 1839, 689, 1839, 1839, 1839,
- 1839, 1839, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 1839, 1839, 1839, 1839, 1839, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21325,
- 1408, 1397, 1380, -1000, 16796, 1839, -1000, -1000, -1000, -1000,
+ -1000, -1000, 46536, 256, 46536, -1000, 629, 46536, 876, 876,
+ 118, 876, 876, 876, 876, 216, 731, 90, -1000, 215,
+ 294, 200, 296, 859, 316, -1000, -1000, 283, 859, 1533,
+ -1000, 706, 197, -1000, 876, 876, -1000, 12218, 152, 12218,
+ 12218, -1000, 2140, -1000, -1000, -1000, -1000, -1000, 1135, -1000,
+ -1000, -1000, -1000, -1000, 518, -1000, -1000, -1000, -1000, 45242,
+ 42654, -1000, -1000, 65, -1000, -1000, 1598, 1041, 18713, 1233,
+ -1000, 1564, 679, -1000, -1000, -1000, -1000, -1000, 596, -1000,
+ 19360, 19360, 19360, 19360, -1000, -1000, 1672, 42007, 1672, 1672,
+ 19360, 1672, -1000, 19360, 1672, 1672, 1672, 18713, 1672, 1672,
+ 1672, 1672, -1000, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, -1000, -1000, -1000, -1000, 1672, 627, 1672, 1672, 1672,
+ 1672, 1672, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 1672, 1672, 1672, 1672, 1672, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21301,
+ 1339, 1337, 1324, -1000, 16125, 1672, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 47207, -1000, 1839, 233,
- 45913, 45913, 425, 2326, 1911, -1000, 2398, 2379, 394, -1000,
- 3108, 1406, 1439, 1334, 1911, 1814, 47207, -1000, 1853, -1000,
- -1000, -1000, -1000, 2096, 1393, 1687, -1000, -1000, -1000, -1000,
- 2027, 19384, -1000, -1000, 2422, -1000, 22620, 688, 2415, 42031,
- -1000, 569, 569, 1827, 428, 57, -1000, -1000, -1000, -1000,
- 841, 29091, -1000, -1000, -1000, -1000, 1732, 47207, -1000, -1000,
- 4853, 1252, -1000, 1963, -1000, 1730, -1000, 1896, 19384, 1929,
- 613, 1252, 597, 596, 595, -1000, -17, -1000, -1000, -1000,
- -1000, -1000, -1000, 814, 814, 814, -1000, 484, 2384, 4625,
- 3608, -1000, -1000, -1000, 41384, 1961, 1252, -1000, 1956, -1000,
- 927, 663, 722, 722, 1252, -1000, -1000, 46560, 1252, 926,
- 923, 1252, 1252, 45913, 45913, -1000, 40737, -1000, 40090, 39443,
- 1180, 45913, 38796, 38149, 37502, 36855, 36208, -1000, 2054, -1000,
- 1942, -1000, -1000, -1000, 46560, 1252, 1252, 46560, 45913, 46560,
- 47207, 1252, -1000, -1000, 385, -1000, -1000, 1178, 1165, 1163,
- 814, 814, 1162, 1678, 1670, 1665, 814, 814, 1161, 1663,
- 31032, 1656, 362, 1158, 1154, 1150, 1152, 1647, 199, 1580,
- 1128, 1127, 1149, 45913, 1952, 47207, -1000, 327, 824, 561,
- 837, 2261, 2178, 1825, 580, 608, 1252, 560, 560, 45913,
- -1000, 14848, -1000, -1000, 1578, 19384, -1000, 942, 941, 941,
- -1000, -1000, -1000, -1000, -1000, -1000, 955, 47207, 942, -1000,
- -1000, -1000, 941, 955, 47207, 955, 955, 955, 955, 941,
- 941, 941, 955, 47207, 47207, 47207, 47207, 47207, 47207, 47207,
- 47207, 47207, 12889, 791, 955, -300, -1000, 1572, -1000, 2073,
+ -1000, -1000, -1000, -1000, -1000, -1000, 46536, -1000, 1672, 232,
+ 45242, 45242, 423, 2240, 1745, -1000, 2334, 2254, 445, -1000,
+ 2376, 1409, 1473, 1195, 1745, 1651, 46536, -1000, 1700, -1000,
+ -1000, -1000, -1000, 1936, 1283, 1514, -1000, -1000, -1000, -1000,
+ 2028, 18713, -1000, -1000, 2350, -1000, 22596, 624, 2348, 41360,
+ -1000, 500, 500, 1665, 439, 40, -1000, -1000, -1000, -1000,
+ 753, 29067, -1000, -1000, -1000, -1000, 1585, 46536, -1000, -1000,
+ 5079, 1129, -1000, 1776, -1000, 1538, -1000, 1739, 18713, 1747,
+ 568, 1129, 564, 549, 548, -1000, -2, -1000, -1000, -1000,
+ -1000, -1000, -1000, 724, 724, 724, -1000, 376, 2312, 309,
+ 5021, -1000, -1000, -1000, 40713, 1775, 1129, -1000, 1772, -1000,
+ 856, 597, 642, 642, 1129, -1000, -1000, 45889, 1129, 853,
+ 847, 1129, 1129, 45242, 45242, -1000, 40066, -1000, 39419, 38772,
+ 1110, 45242, 38125, 37478, 36831, 36184, 35537, -1000, 2078, -1000,
+ 1785, -1000, -1000, -1000, 45889, 1129, 1129, 45889, 45242, 45889,
+ 46536, 1129, -1000, -1000, 385, -1000, -1000, 1108, 1107, 1102,
+ 724, 724, 1099, 1513, 1509, 1507, 724, 724, 1094, 1506,
+ 31008, 1505, 290, 1091, 1085, 1081, 1128, 1504, 188, 1490,
+ 1043, 1015, 1075, 45242, 1767, 46536, -1000, 282, 764, 695,
+ 749, 2147, 2022, 1663, 511, 567, 1129, 493, 493, 45242,
+ -1000, 12871, -1000, -1000, 1481, 18713, -1000, 875, 859, 859,
+ -1000, -1000, -1000, -1000, -1000, -1000, 876, 46536, 875, -1000,
+ -1000, -1000, 859, 876, 46536, 876, 876, 876, 876, 859,
+ 859, 859, 876, 46536, 46536, 46536, 46536, 46536, 46536, 46536,
+ 46536, 46536, 12218, 706, 876, -313, -1000, 1469, -1000, 1892,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
@@ -6430,281 +6351,281 @@ var yyPact = [...]int{
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 12889, 12889, -1000, -1000,
- -1000, -1000, 200, -1000, 35561, 409, 835, -1000, 1824, 34914,
- -1000, -333, -340, -342, -359, -1000, -1000, -1000, -360, -362,
- -1000, -1000, -1000, 19384, 19384, 19384, 19384, -147, -1000, 936,
- 20031, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 171, 965,
- 20031, 20031, 20031, 20031, 20031, 20031, 20031, 20031, 20031, 20031,
- 20031, 20031, 20031, 20031, 20031, -1000, -1000, 27150, 6253, 6253,
- 749, 749, 749, 749, -1000, -82, 1823, 46560, -1000, -1000,
- -1000, 684, 19384, 19384, 749, -1000, 1252, 16796, 34267, 18737,
- 18737, 19384, 856, 1467, 46560, 19384, -1000, 1334, -1000, -1000,
- -1000, 1142, -1000, 954, 2245, 2245, 2245, 2245, 19384, 19384,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 2245, 45913,
- 45913, 858, 19384, 19384, 19384, 19384, 19384, 19384, 15501, 19384,
- 19384, 20031, 19384, 19384, 19384, 1334, 19384, 19384, 19384, 19384,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384,
- 19384, 19384, 19384, 1334, 19384, 1372, 19384, 19384, 18737, 14195,
- 18737, 18737, 18737, 18737, 18737, -1000, -1000, -1000, -1000, -1000,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 1334, 19384,
- 19384, 19384, 19384, 19384, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, 1591, 1333, 1404, 19384, -1000, 1817,
- -1000, -84, 24562, 19384, 1557, 2413, 2002, 45913, -1000, -1000,
- -1000, 2326, -1000, 2326, 1591, 3101, 2099, 18737, -1000, -1000,
- 3101, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1646,
- -1000, 47207, 1814, 2287, 45913, 2085, 1555, 514, -1000, 19384,
- 19384, 1810, -1000, 1364, 47207, -1000, -147, -1000, 33620, -1000,
- -1000, 12236, 47207, 421, 47207, -1000, 23915, 32973, 281, 57,
- -1000, 1780, -1000, 49, 40, 16148, 748, -1000, -1000, -1000,
- 3182, 20678, 1739, 748, 130, -1000, -1000, -1000, 1896, -1000,
- 1896, 1896, 1896, 1896, 514, 514, 514, 514, -1000, -1000,
- -1000, -1000, -1000, 1950, 1946, -1000, 1896, 1896, 1896, 1896,
+ -1000, -1000, -1000, -1000, -1000, -1000, 12218, 12218, -1000, -1000,
+ -1000, -1000, 208, -1000, 34890, 396, 748, -1000, 1661, 34243,
+ -1000, -316, -322, -323, -330, -1000, -1000, -1000, -351, -364,
+ -1000, -1000, -1000, 18713, 18713, 18713, 18713, -152, -1000, 959,
+ 19360, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 139, 907,
+ 19360, 19360, 19360, 19360, 19360, 19360, 19360, 19360, 19360, 19360,
+ 19360, 19360, 19360, 19360, 19360, -1000, -1000, 27126, 7353, 7353,
+ 679, 679, 679, 679, -1000, -65, 1659, 45889, -1000, -1000,
+ -1000, 622, 18713, 18713, 679, -1000, 1129, 16125, 20007, 18066,
+ 18066, 18713, 759, 1041, 45889, 18713, -1000, 1195, -1000, -1000,
+ -1000, 1080, -1000, 854, 2127, 2127, 2127, 2127, 18713, 18713,
+ 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 2127, 45242,
+ 45242, 862, 18713, 18713, 18713, 18713, 18713, 18713, 14830, 18713,
+ 18713, 19360, 18713, 18713, 18713, 1195, 18713, 18713, 18713, 18713,
+ 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713,
+ 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713,
+ 18713, 18713, 18713, 1195, 18713, 1014, 18713, 18713, 18066, 14177,
+ 18066, 18066, 18066, 18066, 18066, -1000, -1000, -1000, -1000, -1000,
+ 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 1195, 18713,
+ 18713, 18713, 18713, 18713, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 1415, 1231, 1242, 18713, -1000, 1652,
+ -1000, -155, 24538, 18713, 1447, 2346, 1826, 45242, -1000, -1000,
+ -1000, 2240, -1000, 2240, 1415, 2170, 1963, 18066, -1000, -1000,
+ 2170, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1291,
+ -1000, 46536, 1651, 2204, 45242, 1928, 1445, 368, -1000, 18713,
+ 18713, 1650, -1000, 1120, 46536, -1000, -152, -1000, 33596, -1000,
+ -1000, 11565, 46536, 357, 46536, -1000, 23891, 32949, 374, 40,
+ -1000, 1624, -1000, 53, 61, 15477, 662, -1000, -1000, -1000,
+ 280, 20654, 1596, 662, 121, -1000, -1000, -1000, 1739, -1000,
+ 1739, 1739, 1739, 1739, 368, 368, 368, 368, -1000, -1000,
+ -1000, -1000, -1000, 1761, 1760, -1000, 1739, 1739, 1739, 1739,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1943, 1943, 1943,
- 1906, 1906, 535, -1000, 19384, 304, 32326, 2267, 1139, 2036,
- 327, 564, 1995, 1252, 1252, 1252, 564, -1000, 1329, 1327,
- 1322, -1000, -447, 1796, -1000, -1000, 2382, -1000, -1000, 943,
- 957, 949, 984, 45913, 247, 408, -1000, 515, -1000, 32326,
- 1252, 916, 722, 1252, -1000, 1252, -1000, -1000, -1000, -1000,
- -1000, 1252, -1000, -1000, 1795, -1000, 1822, 1003, 945, 979,
- 938, 1795, -1000, -1000, -88, 1795, -1000, 1795, -1000, 1795,
- -1000, 1795, -1000, 1795, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 831, 227, -208, 45913, 247, 579, -1000,
- 573, 27150, -1000, -1000, -1000, 27150, 27150, -1000, -1000, -1000,
- -1000, 1538, 1535, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1759, 1759, 1759,
+ 1740, 1740, 478, -1000, 18713, 301, 32302, 2173, 1068, 1274,
+ 282, 498, 1796, 1129, 1129, 1129, 498, -1000, 1173, 1161,
+ 1155, -1000, -445, 1633, -1000, -1000, 2311, -1000, -1000, 948,
+ 886, 880, 991, 45242, 248, 355, -1000, 467, -1000, 32302,
+ 1129, 844, 642, 1129, -1000, 1129, -1000, -1000, -1000, -1000,
+ -1000, 1129, -1000, -1000, 1631, -1000, 1654, 917, 871, 913,
+ 868, 1631, -1000, -1000, -91, 1631, -1000, 1631, -1000, 1631,
+ -1000, 1631, -1000, 1631, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 782, 229, -236, 45242, 248, 510, -1000,
+ 503, 27126, -1000, -1000, -1000, 27126, 27126, -1000, -1000, -1000,
+ -1000, 1437, 1422, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -418, 47207, -1000, 264, 832, 387, 437, 436,
- 47207, 429, 2318, 2316, 2310, 2301, 2294, 355, 376, 47207,
- 47207, 560, 2047, 47207, 2276, 47207, -1000, -1000, -1000, -1000,
- -1000, 1467, 47207, -1000, -1000, 955, 955, -1000, -1000, 47207,
- 955, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 955,
+ -1000, -1000, -431, 46536, -1000, 262, 745, 336, 420, 322,
+ 46536, 345, 2229, 2228, 2224, 2218, 2213, 271, 321, 46536,
+ 46536, 493, 1887, 46536, 2181, 46536, -1000, -1000, -1000, -1000,
+ -1000, 1041, 46536, -1000, -1000, 876, 876, -1000, -1000, 46536,
+ 876, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 876,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 47207, -1000, -1000, -1000, -1000,
- 45913, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -99, 158, 47, 388, -1000, -1000, -1000, -1000, -1000, 2323,
- -1000, 1467, 894, 901, -1000, 1839, -1000, -1000, 1030, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 171, 20031, 20031, 20031,
- 1288, 610, 1636, 1100, 1151, 1135, 1135, 1004, 1004, 753,
- 753, 753, 753, 753, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 1532, -1000, 1839, 46560, 1724, 14195, 1213, 2005,
- 1334, 2956, -1000, 1660, -1000, 1660, 1811, 859, -1000, 19384,
- 1334, 2916, -1000, -1000, 1334, 1334, 1334, 19384, -1000, -1000,
- 19384, 19384, 19384, 19384, 2036, 2036, 2036, 2036, 2036, 2036,
- 2036, 2036, 2036, 2036, 19384, 1794, 1786, 2411, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 46536, -1000, -1000, -1000, -1000,
+ 45242, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -93, 44, 67, 371, -1000, -1000, -1000, -1000, -1000, 2234,
+ -1000, 1041, 846, 814, -1000, 1672, -1000, -1000, 922, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, 139, 19360, 19360, 19360,
+ 1384, 533, 1449, 1643, 1103, 1199, 1199, 858, 858, 686,
+ 686, 686, 686, 686, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 1389, -1000, 1672, 45889, 1502, 14177, 1834, 1742,
+ 1195, 3057, -1000, 1495, -1000, 1495, 1521, 800, -1000, 18713,
+ 1195, 3030, -1000, -1000, 1195, 1195, 1195, 18713, -1000, -1000,
+ 18713, 18713, 18713, 18713, 1274, 1274, 1274, 1274, 1274, 1274,
+ 1274, 1274, 1274, 1274, 18713, 1629, 1628, 2342, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1044,
- 2036, 2036, 2036, 2036, 2036, 19384, 1721, -1000, -1000, -1000,
- 1387, 2894, 1238, 2885, 2036, 2036, -1000, 2036, 2857, 2837,
- 1334, 1744, 1334, 1785, -1000, 2813, 2036, 2807, 2802, 2797,
- 2014, 2792, 2776, 2770, 2036, 2036, 2036, 1980, 2764, 2758,
- 2752, 2740, 2728, 2709, 2697, 2690, 2684, 2036, -150, 2036,
- 1334, -1000, -1000, -1000, -1000, -1000, 2679, 1971, 1334, 1784,
- 1839, 676, -1000, -1000, 1660, 1334, 1334, 1660, 1660, 2624,
- 2601, 2582, 2432, 2311, 2303, 2036, 2036, -1000, 2036, 2293,
- 2255, 1958, 1940, 1334, -1000, 1404, 47207, -1000, -284, -1000,
- 24, 797, 1839, -1000, 31032, 1334, -1000, 6098, -1000, 1189,
- -1000, -1000, -1000, -1000, -1000, 28444, 1737, 3101, -1000, -1000,
- 1839, 1655, -1000, -1000, 514, 95, 27797, 735, 735, 137,
- 1467, 1467, 19384, -1000, -1000, -1000, -1000, -1000, -1000, 673,
- 2399, 391, 1839, -1000, 1790, 2514, -1000, -1000, -1000, 2285,
- 21973, -1000, -1000, 1839, 1839, 47207, 1708, 1691, -1000, 670,
- -1000, 1255, 1780, 57, 46, -1000, -1000, -1000, -1000, 1467,
- -1000, 1275, 430, 1726, -1000, 552, -1000, -1000, -1000, -1000,
- 2190, 107, -1000, -1000, -1000, 279, 514, -1000, -1000, -1000,
- -1000, -1000, -1000, 1521, 1521, -1000, -1000, -1000, -1000, -1000,
- 1134, -1000, -1000, -1000, 1133, -1000, -1000, 2114, 2038, 304,
- -1000, -1000, 814, 1517, -1000, -1000, 2197, 814, 814, 45913,
- -1000, -1000, 1709, 2267, 264, 47207, 868, 2046, -1000, 1995,
- 1995, 1995, 47207, -1000, -1000, -1000, -1000, -1000, -1000, -437,
- 63, 384, -1000, -1000, -1000, 309, 45913, 1653, -1000, 249,
- -1000, 1705, -1000, 45913, -1000, 1640, 1930, 1252, 1252, -1000,
- -1000, -1000, 45913, 1839, -1000, -1000, -1000, -1000, 605, 2242,
- 308, -1000, -1000, -183, -1000, -1000, 247, 249, 46560, 1252,
- 748, -1000, -1000, -1000, -1000, -1000, -421, 1638, 589, 255,
- 339, 47207, 47207, 47207, 47207, 47207, 655, -1000, -1000, 68,
- -1000, -1000, 224, -1000, -1000, -1000, -1000, 224, -1000, -1000,
- -1000, -1000, 370, 568, -1000, 47207, 47207, 710, -1000, -1000,
- -1000, 941, -1000, -1000, 941, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 2233, 47207, 37, -383, -1000,
- -379, 19384, -1000, -1000, -1000, -1000, 1217, 609, 1636, 20031,
- 20031, 20031, -1000, -1000, -1000, 884, 884, 27150, -1000, 19384,
- 18737, -1000, -1000, 19384, 19384, 846, -1000, 19384, 1014, -1000,
- 19384, -1000, -1000, -1000, 1404, 2036, 2036, 2036, 2036, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1818,
- 19384, 19384, 19384, 1334, 320, -1000, -1000, -1000, -1000, -1000,
- 2410, -1000, 19384, -1000, 27150, 19384, 19384, 19384, -1000, -1000,
- -1000, 19384, 19384, -1000, -1000, 19384, 19384, -1000, 19384, 19384,
- 19384, -1000, 19384, 19384, 19384, 19384, -1000, -1000, -1000, -1000,
- 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384, 19384,
- -1000, -1000, 32326, 106, -150, 1372, 106, 1372, -1000, 18737,
- 13542, -1000, -1000, -1000, -1000, -1000, 19384, 19384, 19384, 19384,
- 19384, 19384, -1000, -1000, -1000, 19384, 19384, -1000, 19384, -1000,
- 19384, -1000, -1000, -1000, -1000, -1000, 797, -1000, 722, 722,
- 722, 45913, -1000, -1000, -1000, -1000, 1777, -1000, 2314, -1000,
- 2131, 2116, 2409, 2399, -1000, 23915, 3101, -1000, -1000, 45913,
- -274, -1000, 2172, 2187, 735, 735, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 11583, 2326, 19384, 2040, 46560, 159, -1000,
- 23268, 45913, 46560, 23915, 23915, 23915, 23915, 23915, -1000, 2070,
- 2061, -1000, 2115, 2095, 2148, 47207, -1000, 1591, 1633, -1000,
- 19384, 25856, 1751, 23915, -1000, -1000, 23915, 47207, 10930, -1000,
- -1000, 31, 35, -1000, -1000, -1000, -1000, 3182, -1000, -1000,
- 3205, 2284, 2184, -1000, -1000, -1000, -1000, -1000, 1631, -1000,
- 1629, 1771, 1625, 227, -1000, 1898, 2232, 814, 814, -1000,
- 1125, -1000, 1252, 1506, 1504, -1000, -1000, -1000, 587, -1000,
- 2274, 47207, 2039, 2037, 2034, -1000, -445, 1123, 1923, 1887,
- 19384, 1920, 2381, 1767, 45913, -1000, -1000, 46560, -1000, 300,
- -1000, 304, 45913, -1000, -1000, -1000, 408, 47207, -1000, 5931,
- -1000, -1000, -1000, 249, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 47207, 329, -1000, 1908, 1233, -1000, -1000, 1977, -1000,
- -1000, -1000, -1000, 271, 386, 1478, 222, 1448, 222, -1000,
- 47207, 707, 2038, 47207, -1000, -1000, -1000, 955, 955, -1000,
- -1000, 2223, -1000, 1252, 2036, 20031, 20031, -1000, 749, 322,
- -128, 1896, 1896, -1000, 1896, 1906, -1000, 1896, 181, 1896,
- 179, 1896, -1000, -1000, 1334, 1334, 1404, -1000, 1935, 993,
- -1000, 1467, 19384, 2236, -1000, -1000, -1000, -1000, -1000, -23,
- 2210, 2194, 2036, -1000, 1890, 1889, 19384, 2036, 1334, 1924,
- 2036, 2036, 2036, 2036, -1000, 1467, 1404, 2181, 1404, 2036,
- 2036, 2175, 333, 2036, 1621, 1621, 1621, 1621, 1621, 1404,
- 1404, 1404, 1404, 45913, -1000, -150, -1000, -1000, -201, -202,
- -1000, 1334, -150, 1770, 1334, -1000, 1912, 1903, 2139, 1893,
- 2036, 2104, 2036, 2036, 2036, 1807, -1000, 2304, 2304, 2304,
- 1576, 1189, 47207, -1000, -1000, -1000, -1000, 2399, 2394, 1768,
- -1000, -1000, 95, 427, -1000, 2149, 2187, -1000, 2360, 2156,
- 2356, -1000, -1000, -1000, -1000, -1000, 1467, -1000, 2246, 1747,
- -1000, 819, 1753, -1000, -1000, 18090, 1613, 2103, 668, 1576,
- 1820, 2514, 1993, 2030, 2315, -1000, -1000, -1000, -1000, 2060,
- -1000, 2059, -1000, -1000, 1853, -1000, 2010, 421, 23915, 1734,
- 1734, -1000, 666, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 982, 5268, 2435, -1000, 1440, -1000, 1188, 205, 1113, -1000,
- -1000, 814, 814, -1000, 915, 910, -1000, 47207, 1885, -1000,
- 514, 1414, 514, 1107, -1000, 1106, -1000, -1000, -1000, -1000,
- 1882, 2025, -1000, -1000, -1000, -1000, 47207, -1000, -1000, 47207,
- 47207, 47207, 1873, 2354, -1000, 19384, 1872, 816, 2091, 45913,
- 45913, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, 528, 814, -400, 373, 372, 814, 814, 814,
- -446, -1000, -1000, 1567, 1563, -1000, -116, -1000, 19384, -1000,
- -1000, -1000, 1110, 1110, 1408, 1397, 1380, -1000, 1853, -1000,
- -1000, -1000, 1696, -1000, -1000, -96, 45913, 45913, 45913, 45913,
- -1000, -1000, 1040, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 749, 1334, 345, -98, 1334,
- -1000, -1000, 514, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 19384, -1000, 19384, -1000, 1467, 19384, 2326,
- 1373, 19384, 19384, -1000, 1094, 1084, 2036, -1000, -1000, -1000,
- 19384, -1000, -1000, -1000, -1000, -1000, 19384, -1000, -1000, -1000,
- 19384, 231, 884, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 1334, 415, -1000, -1000, -1000, -1000, 2403,
- -1000, 1334, 19384, -1000, -1000, 19384, -1000, 19384, 19384, -1000,
- 19384, -1000, 19384, -1000, -1000, -1000, -1000, 19384, 1839, 2167,
- 1839, 1839, 25856, -1000, -1000, 2394, 2392, 2348, 2146, 2151,
- 2151, 2149, -1000, 2347, 2346, -1000, 1358, 2345, 1356, 909,
- -1000, 46560, 19384, 159, -1000, 393, 45913, 159, 45913, -1000,
- 2389, -1000, -1000, 19384, 1867, -1000, 19384, -1000, -1000, -1000,
- -1000, 6253, 2399, 1734, -1000, -1000, 764, -1000, 19384, -1000,
- -1000, -1000, 108, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 1354, 1341, -1000, -1000, 1858, 19384, -1000, -1000, -1000,
- 1626, 1577, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- 1853, -1000, -1000, -1000, -1000, 408, -441, 2083, 45913, 1073,
- -1000, 1549, 1767, 404, 159, 1339, 814, 814, 814, 1072,
- 1035, 31032, 1547, -1000, 45913, 506, -1000, 408, -1000, -123,
- -125, 2036, -1000, -1000, 2283, -1000, -1000, 13542, -1000, -1000,
- 1850, 1984, -1000, -1000, -1000, -1000, 2076, -97, -103, -1000,
- -1000, 2036, 2036, 1990, 1334, -1000, 2036, 2036, 1564, 1407,
- -1000, 2036, 1404, 1791, -1000, 231, 1334, 2024, -1000, -1000,
- 6253, -1000, -1000, 2389, 2344, 106, -1000, -1000, 246, 106,
- 1467, 1762, 2036, 1728, 1701, 2036, 2036, 26503, -1000, 2330,
- 2319, 31679, 31679, 797, 2392, -157, 19384, 19384, 2141, 1075,
- -1000, -1000, -1000, -1000, 1336, 1331, -1000, 1325, -1000, 2430,
- -1000, 1467, -1000, 159, -1000, 665, 1753, -1000, 2326, 1467,
- 45913, 1467, 96, 2389, -1000, 2036, -1000, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839, 1839,
- 1839, 1839, 1839, 1839, 1839, 1839, 1839, -1000, -1000, 45913,
- 2071, -1000, -1000, 2280, 1528, 61, -1000, 1391, 1767, -1000,
- -1000, 156, -1000, 19384, -1000, 31032, 1304, 1270, -1000, -1000,
- -1000, -1000, -446, -1000, -1000, -1000, -1000, -1000, -1000, 394,
- 1761, -1000, 813, 45913, 47207, -1000, 2028, -1000, -1000, -1000,
- 19384, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 19384,
- -1000, 1334, 2013, -1000, -267, -1000, -419, 19384, -150, -1000,
- -1000, -150, -1000, 19384, -1000, -1000, 19384, -1000, 19384, -1000,
- -1000, 1525, -1000, -1000, -1000, -1000, -1000, 1525, 1525, -1000,
- -157, -1000, 1757, -1000, 45913, 1467, 1744, -1000, 1036, -1000,
- -1000, -1000, -1000, -1000, 46560, 1753, 45913, -1000, 1431, 1334,
- 1839, 2326, -1000, 1429, -1000, 394, -1000, 1847, 1887, -1000,
- -1000, -1000, 17443, -1000, -1000, -1000, -1000, -1000, 268, -93,
- 13542, 10277, 1413, -1000, -90, 2036, 1404, -1000, -365, -1000,
- -1000, -1000, -1000, 170, -1000, -1000, 1744, -1000, -1000, 1642,
- 1623, 1530, 30385, -1000, -1000, -1000, -1000, -157, -1000, -1000,
- 2279, -1000, -1000, 1743, -1000, -1000, 25856, 45266, -1000, -78,
- 619, -93, 19384, 1842, 1334, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, 29, -1000, -1000, -1000, -1000, -1000, 1977,
- -100, -1000, -1000, -1000, 173, -390, -195, -196, -1000, -1000,
- 20031, -1000, 19384, -1000, 19384, -1000, 19384, -1000, -1000, -1000,
- 45913, 1839, -1000, 1396, -1000, 3836, -214, 2008, -1000, 43,
- -1000, -1000, -1000, 973, 1261, -1000, -1000, -1000, -1000, -1000,
- -1000, 2052, 45913, -1000, 534, -1000, -1000, -96, -108, 899,
- -1000, -1000, -1000, -1000, -1000, 1256, 1222, 2036, -1000, 45913,
- -1000, 45266, -209, 748, 6253, -1000, 2007, 1981, 2408, -1000,
- -1000, -1000, -1000, -1000, -1000, -454, 1390, 266, -1000, -1000,
- 173, -1000, 19384, -1000, 19384, -1000, 1334, -1000, -1000, 2273,
- 96, -1000, 2425, -1000, 2400, 733, 733, -1000, 1015, -454,
- -1000, -1000, 2036, 2036, -1000, -223, -1000, -1000, -1000, -1000,
- -1000, 520, 1086, -1000, -1000, -1000, -1000, -1000, 6253, -1000,
- -1000, -1000, 215, 215, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1644,
+ 1274, 1274, 1274, 1274, 1274, 18713, 1778, -1000, -1000, -1000,
+ 1240, 2980, 1319, 2972, 1274, 1274, -1000, 1274, 2959, 2859,
+ 1195, 1598, 1195, 1627, -1000, 2841, 1274, 2807, 2802, 2791,
+ 2195, 2778, 2774, 2767, 1274, 1274, 1274, 2183, 2747, 2739,
+ 2717, 2711, 2701, 2695, 2689, 2680, 2628, 1274, -165, 1274,
+ 1195, -1000, -1000, -1000, -1000, -1000, 2613, 2161, 1195, 1626,
+ 1672, 621, -1000, -1000, 1495, 1195, 1195, 1495, 1495, 2603,
+ 2599, 2594, 2585, 2446, 2413, 1274, 1274, -1000, 1274, 2371,
+ 2365, 2157, 2152, 1195, -1000, 1242, 46536, -1000, -298, -1000,
+ 45, 661, 1672, -1000, 31008, 1195, -1000, 4155, -1000, 1045,
+ -1000, -1000, -1000, -1000, -1000, 28420, 1486, 2170, -1000, -1000,
+ 1672, 1479, -1000, -1000, 368, 115, 27773, 681, 681, 148,
+ 1041, 1041, 18713, -1000, -1000, -1000, -1000, -1000, -1000, 613,
+ 2325, 373, 1672, -1000, 1620, 2885, -1000, -1000, -1000, 2199,
+ 21949, -1000, -1000, 1672, 1672, 46536, 1554, 1546, -1000, 612,
+ -1000, 1150, 1624, 40, 39, -1000, -1000, -1000, -1000, 1041,
+ -1000, 1142, 358, 680, -1000, 466, -1000, -1000, -1000, -1000,
+ 2084, 117, -1000, -1000, -1000, 304, 368, -1000, -1000, -1000,
+ -1000, -1000, -1000, 1386, 1386, -1000, -1000, -1000, -1000, -1000,
+ 1055, -1000, -1000, -1000, 1050, -1000, -1000, 2293, 1868, 301,
+ -1000, -1000, 724, 1382, -1000, -1000, 2101, 724, 724, 45242,
+ -1000, -1000, 1593, 2173, 262, 46536, 785, 1877, -1000, 1796,
+ 1796, 1796, 46536, -1000, -1000, -1000, -1000, -1000, -1000, -433,
+ 66, 410, -1000, -1000, -1000, 4372, 45242, 1475, -1000, 244,
+ -1000, 1589, -1000, 45242, -1000, 1455, 1758, 1129, 1129, -1000,
+ -1000, -1000, 45242, 1672, -1000, -1000, -1000, -1000, 566, 2143,
+ 337, -1000, -1000, -182, -1000, -1000, 248, 244, 45889, 1129,
+ 662, -1000, -1000, -1000, -1000, -1000, -434, 1450, 532, 252,
+ 352, 46536, 46536, 46536, 46536, 46536, 583, -1000, -1000, 75,
+ -1000, -1000, 225, -1000, -1000, -1000, -1000, 225, -1000, -1000,
+ -1000, -1000, 302, 502, -1000, 46536, 46536, 617, -1000, -1000,
+ -1000, 859, -1000, -1000, 859, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 2129, 46536, 64, -386, -1000,
+ -383, 18713, -1000, -1000, -1000, -1000, 1235, 527, 1449, 19360,
+ 19360, 19360, -1000, -1000, -1000, 887, 887, 27126, -1000, 18713,
+ 18066, -1000, -1000, 18713, 18713, 732, -1000, 18713, 973, -1000,
+ 18713, -1000, -1000, -1000, 1242, 1274, 1274, 1274, 1274, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1614,
+ 18713, 18713, 18713, 1195, 342, -1000, -1000, -1000, -1000, -1000,
+ 2340, -1000, 18713, -1000, 27126, 18713, 18713, 18713, -1000, -1000,
+ -1000, 18713, 18713, -1000, -1000, 18713, 18713, -1000, 18713, 18713,
+ 18713, -1000, 18713, 18713, 18713, 18713, -1000, -1000, -1000, -1000,
+ 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713, 18713,
+ -1000, -1000, 32302, 110, -165, 1014, 110, 1014, -1000, 18066,
+ 13524, -1000, -1000, -1000, -1000, -1000, 18713, 18713, 18713, 18713,
+ 18713, 18713, -1000, -1000, -1000, 18713, 18713, -1000, 18713, -1000,
+ 18713, -1000, -1000, -1000, -1000, -1000, 661, -1000, 642, 642,
+ 642, 45242, -1000, -1000, -1000, -1000, 1615, -1000, 2236, -1000,
+ 1975, 1973, 2332, 2325, -1000, 23891, 2170, -1000, -1000, 45242,
+ -292, -1000, 2019, 1992, 681, 681, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 10912, 2240, 18713, 1876, 45889, 140, -1000,
+ 23244, 45242, 45889, 23891, 23891, 23891, 23891, 23891, -1000, 1907,
+ 1900, -1000, 1952, 1948, 1958, 46536, -1000, 1415, 1434, -1000,
+ 18713, 25832, 1583, 23891, -1000, -1000, 23891, 46536, 10259, -1000,
+ -1000, 58, 52, -1000, -1000, -1000, -1000, 280, -1000, -1000,
+ 919, 2187, 2044, -1000, -1000, -1000, -1000, -1000, 1420, -1000,
+ 1406, 1613, 1385, 229, -1000, 1743, 2126, 724, 724, -1000,
+ 1042, -1000, 1129, 1375, 1366, -1000, -1000, -1000, 531, -1000,
+ 2180, 46536, 1875, 1867, 1865, -1000, -442, 1029, 1755, 1707,
+ 18713, 1752, 2308, 1606, 45242, -1000, -1000, 45889, -1000, 329,
+ -1000, 301, 45242, -1000, -1000, -1000, 355, 46536, -1000, 6547,
+ -1000, -1000, -1000, 244, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 46536, 273, -1000, 1750, 1121, -1000, -1000, 1787, -1000,
+ -1000, -1000, -1000, 223, 318, 1357, 230, 1353, 230, -1000,
+ 46536, 616, 1868, 46536, -1000, -1000, -1000, 876, 876, -1000,
+ -1000, 2121, -1000, 1129, 1274, 19360, 19360, -1000, 679, 255,
+ -134, 1739, 1739, -1000, 1739, 1740, -1000, 1739, 199, 1739,
+ 198, 1739, -1000, -1000, 1195, 1195, 1242, -1000, 2138, 1372,
+ -1000, 1041, 18713, 2358, -1000, -1000, -1000, -1000, -1000, -8,
+ 2322, 2306, 1274, -1000, 1732, 1731, 18713, 1274, 1195, 1912,
+ 1274, 1274, 1274, 1274, -1000, 1041, 1242, 2272, 1242, 1274,
+ 1274, 2251, 333, 1274, 1381, 1381, 1381, 1381, 1381, 1242,
+ 1242, 1242, 1242, 45242, -1000, -165, -1000, -1000, -203, -208,
+ -1000, 1195, -165, 1609, 1195, -1000, 1906, 1851, 2239, 1824,
+ 1274, 2235, 1274, 1274, 1274, 1820, -1000, 2231, 2231, 2231,
+ 1363, 1045, 46536, -1000, -1000, -1000, -1000, 2325, 2318, 1607,
+ -1000, -1000, 115, 453, -1000, 1987, 1992, -1000, 2301, 2002,
+ 2297, -1000, -1000, -1000, -1000, -1000, 1041, -1000, 2155, 1592,
+ -1000, 740, 1603, -1000, -1000, 17419, 1374, 1954, 603, 1363,
+ 1625, 2885, 1871, 1860, 1940, -1000, -1000, -1000, -1000, 1861,
+ -1000, 1830, -1000, -1000, 1700, -1000, 2225, 357, 23891, 1595,
+ 1595, -1000, 602, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 915, 6195, 2366, -1000, 1351, -1000, 1140, 193, 1013, -1000,
+ -1000, 724, 724, -1000, 838, 828, -1000, 46536, 1716, -1000,
+ 368, 1350, 368, 1009, -1000, 1008, -1000, -1000, -1000, -1000,
+ 1738, 1819, -1000, -1000, -1000, -1000, 46536, -1000, -1000, 46536,
+ 46536, 46536, 1714, 2294, -1000, 18713, 1711, 726, 2177, 45242,
+ 45242, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 463, 724, -414, 319, 314, 724, 724, 724,
+ -443, -1000, -1000, 1349, 1344, -1000, -122, -1000, 18713, -1000,
+ -1000, -1000, 1061, 1061, 1339, 1337, 1324, -1000, 1700, -1000,
+ -1000, -1000, 1540, -1000, -1000, -102, 45242, 45242, 45242, 45242,
+ -1000, -1000, 993, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 679, 1195, 341, -109, 1195,
+ -1000, -1000, 368, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 18713, -1000, 18713, -1000, 1041, 18713, 2240,
+ 1304, 18713, 18713, -1000, 1005, 962, 1274, -1000, -1000, -1000,
+ 18713, -1000, -1000, -1000, -1000, -1000, 18713, -1000, -1000, -1000,
+ 18713, 221, 887, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 1195, 356, -1000, -1000, -1000, -1000, 2328,
+ -1000, 1195, 18713, -1000, -1000, 18713, -1000, 18713, 18713, -1000,
+ 18713, -1000, 18713, -1000, -1000, -1000, -1000, 18713, 1672, 2059,
+ 1672, 1672, 25832, -1000, -1000, 2318, 2274, 2291, 1990, 1993,
+ 1993, 1987, -1000, 2281, 2275, -1000, 1302, 2262, 1292, 823,
+ -1000, 45889, 18713, 140, -1000, 389, 45242, 140, 45242, -1000,
+ 2255, -1000, -1000, 18713, 1710, -1000, 18713, -1000, -1000, -1000,
+ -1000, 7353, 2325, 1595, -1000, -1000, 693, -1000, 18713, -1000,
+ -1000, -1000, 3726, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 1284, 1232, -1000, -1000, 1709, 18713, -1000, -1000, -1000,
+ 1518, 1503, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 1700, -1000, -1000, -1000, -1000, 355, -438, 1498, 45242, 958,
+ -1000, 1333, 1606, 334, 140, 1229, 724, 724, 724, 957,
+ 952, 31008, 1330, -1000, 45242, 455, -1000, 355, -1000, -129,
+ -131, 1274, -1000, -1000, 2185, -1000, -1000, 13524, -1000, -1000,
+ 1697, 1794, -1000, -1000, -1000, -1000, 1921, -92, -114, -1000,
+ -1000, 1274, 1274, 2217, 1195, -1000, 1274, 1274, 1487, 1410,
+ -1000, 1274, 1242, 1791, -1000, 221, 1195, 1857, -1000, -1000,
+ 7353, -1000, -1000, 2255, 2249, 110, -1000, -1000, 260, 110,
+ 1041, 1764, 1274, 1702, 1692, 1274, 1274, 26479, -1000, 2248,
+ 2247, 31655, 31655, 661, 2274, -172, 18713, 18713, 1984, 996,
+ -1000, -1000, -1000, -1000, 1215, 1170, -1000, 1167, -1000, 2360,
+ -1000, 1041, -1000, 140, -1000, 600, 1603, -1000, 2240, 1041,
+ 45242, 1041, 113, 2255, -1000, 1274, -1000, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672, 1672,
+ 1672, 1672, 1672, 1672, 1672, 1672, 1672, -1000, -1000, 45242,
+ 1440, -1000, -1000, 2184, 1323, 63, -1000, 1320, 1606, -1000,
+ -1000, 132, -1000, 18713, -1000, 31008, 1159, 1157, -1000, -1000,
+ -1000, -1000, -443, -1000, -1000, -1000, -1000, -1000, -1000, 445,
+ 1605, -1000, 719, 45242, 46536, -1000, 1914, -1000, -1000, -1000,
+ 18713, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 18713,
+ -1000, 1195, 1852, -1000, -267, -1000, -417, 18713, -165, -1000,
+ -1000, -165, -1000, 18713, -1000, -1000, 18713, -1000, 18713, -1000,
+ -1000, 1290, -1000, -1000, -1000, -1000, -1000, 1290, 1290, -1000,
+ -172, -1000, 1604, -1000, 45242, 1041, 1598, -1000, 939, -1000,
+ -1000, -1000, -1000, -1000, 45889, 1603, 45242, -1000, 1272, 1195,
+ 1672, 2240, -1000, 1268, -1000, 445, -1000, 1686, 1707, -1000,
+ -1000, -1000, 16772, -1000, -1000, -1000, -1000, -1000, 217, -96,
+ 13524, 9606, 1266, -1000, -94, 1274, 1242, -1000, -375, -1000,
+ -1000, -1000, -1000, 150, -1000, -1000, 1598, -1000, -1000, 1517,
+ 1489, 1431, 30361, -1000, -1000, -1000, -1000, -172, -1000, -1000,
+ 2178, -1000, -1000, 1338, -1000, -1000, 25832, 44595, -1000, -63,
+ 626, -96, 18713, 1673, 1195, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 24, -1000, -1000, 591, -1000, -1000, -1000,
+ 1787, -110, -1000, -1000, -1000, 135, -395, -197, -220, -1000,
+ -1000, 19360, -1000, 18713, -1000, 18713, -1000, 18713, -1000, -1000,
+ -1000, 45242, 1672, -1000, 1194, -1000, 4232, -245, 1849, -1000,
+ -60, -1000, -1000, -1000, 904, 1109, -1000, -1000, -1000, -1000,
+ -1000, -1000, 1336, 45242, -1000, 468, -1000, -1000, 12871, -102,
+ -115, 777, -1000, -1000, -1000, -1000, -1000, 1066, 988, 1274,
+ -1000, 45242, -1000, 44595, -219, 662, 7353, -1000, 1841, 1838,
+ 2335, -1000, -1000, -1000, -1000, -1000, -1000, -450, 1191, 276,
+ -1000, -1000, -1000, 135, -1000, 18713, -1000, 18713, -1000, 1195,
+ -1000, -1000, 2164, 113, -1000, 2355, -1000, 2336, 678, 678,
+ -1000, 940, -450, -1000, -1000, 1274, 1274, -1000, -246, -1000,
+ -1000, -1000, -1000, -1000, 458, 1025, -1000, -1000, -1000, -1000,
+ -1000, 7353, -1000, -1000, -1000, 207, 207, -1000, -1000,
}
var yyPgo = [...]int{
- 0, 3025, 3022, 38, 2, 36, 35, 3019, 93, 102,
- 196, 37, 211, 104, 3013, 3007, 3006, 3001, 2997, 2995,
- 2994, 171, 169, 166, 2993, 2992, 2990, 2989, 2988, 2987,
- 2986, 2985, 2984, 2983, 162, 159, 181, 2981, 2980, 2977,
- 117, 179, 89, 91, 183, 2975, 2974, 84, 2972, 2971,
- 2970, 192, 191, 190, 918, 2968, 188, 115, 53, 2967,
- 2966, 2965, 2964, 2963, 2961, 2959, 2958, 2956, 2953, 2951,
- 2950, 2947, 2942, 2940, 2935, 2932, 287, 2930, 2926, 22,
- 2921, 86, 2919, 2916, 2913, 2910, 12, 2908, 2907, 16,
- 40, 2904, 2892, 48, 2888, 2886, 2884, 2883, 2878, 17,
- 2877, 27, 2876, 39, 2875, 2874, 126, 2873, 2868, 2866,
- 41, 2865, 2863, 2862, 2859, 2856, 2855, 2854, 140, 2853,
- 2849, 2847, 236, 185, 2845, 2844, 273, 143, 108, 2843,
- 2842, 114, 180, 2830, 116, 2827, 2824, 2819, 146, 2812,
- 138, 2804, 2803, 66, 71, 2802, 371, 2800, 2799, 11,
- 15, 72, 10, 20, 23, 2795, 2794, 65, 78, 2793,
- 129, 2790, 2787, 98, 69, 2786, 106, 103, 2784, 2782,
- 7, 5, 2781, 3, 1, 4, 68, 2779, 2777, 122,
- 2776, 2775, 2773, 92, 2771, 2764, 2164, 2761, 95, 131,
- 101, 81, 2759, 50, 58, 2757, 2754, 2753, 2746, 2745,
- 54, 2739, 2738, 2737, 136, 380, 160, 2735, 47, 82,
- 46, 134, 2734, 61, 80, 189, 161, 2733, 2731, 137,
- 135, 2726, 2725, 62, 44, 45, 2722, 113, 130, 118,
- 111, 112, 163, 2720, 2716, 60, 75, 2715, 2714, 2708,
- 2705, 164, 2704, 2703, 73, 2702, 57, 2700, 167, 2699,
- 19, 67, 2698, 49, 149, 2697, 76, 2696, 2695, 63,
- 100, 70, 43, 2691, 195, 2690, 56, 170, 128, 151,
- 2687, 2685, 2684, 2681, 187, 338, 2679, 2676, 77, 176,
- 139, 144, 94, 2674, 349, 2668, 2653, 132, 2603, 5785,
- 2651, 42, 154, 2632, 2626, 7129, 157, 52, 26, 2625,
- 109, 2617, 2616, 2615, 2612, 193, 172, 105, 168, 59,
- 2611, 2606, 2604, 14, 2598, 2593, 2590, 2580, 2572, 2570,
- 90, 34, 33, 32, 202, 74, 30, 97, 150, 83,
- 2569, 2568, 2566, 124, 79, 2565, 158, 156, 125, 155,
- 2563, 177, 141, 123, 2562, 119, 31, 2559, 2552, 2551,
- 2549, 99, 2548, 2534, 2525, 2522, 148, 142, 121, 87,
- 2518, 88, 120, 147, 145, 55, 2516, 51, 2504, 2502,
- 29, 182, 28, 2500, 13, 107, 212, 2497, 5000, 178,
- 2496, 21, 367, 174, 2495, 2493, 8, 9, 6, 2491,
- 2490, 2486, 2477, 133, 2470, 2469, 2467, 2463, 25, 64,
- 24, 18, 110, 85, 2459, 2458, 3799, 0, 127, 2423,
- 197,
+ 0, 2968, 2966, 40, 6, 42, 39, 2962, 83, 107,
+ 201, 60, 196, 105, 2950, 2945, 2942, 2940, 2939, 2937,
+ 2936, 182, 181, 172, 2935, 2934, 2931, 2928, 2927, 2925,
+ 2923, 2921, 2918, 2911, 177, 162, 197, 2910, 2907, 2901,
+ 122, 185, 90, 96, 193, 2900, 2894, 81, 2893, 2885,
+ 2884, 190, 188, 186, 837, 2883, 174, 121, 53, 2882,
+ 2875, 2873, 2872, 2865, 2864, 2861, 2854, 2846, 2838, 2829,
+ 2826, 2825, 2824, 2823, 2820, 2818, 290, 2807, 2804, 25,
+ 2803, 84, 2802, 2800, 2798, 2797, 12, 2793, 2790, 16,
+ 43, 2788, 2787, 51, 2786, 2784, 2781, 2780, 2779, 22,
+ 2775, 30, 2774, 41, 2766, 2765, 131, 2764, 2762, 2761,
+ 45, 2759, 2758, 2757, 2756, 2753, 2750, 2749, 144, 2746,
+ 2737, 2734, 166, 210, 2732, 2730, 167, 108, 116, 2728,
+ 2725, 117, 194, 2723, 125, 2722, 2720, 2719, 154, 2718,
+ 686, 2717, 2714, 75, 71, 2713, 28, 2712, 2711, 11,
+ 85, 72, 9, 8, 4, 2709, 2708, 66, 80, 2704,
+ 114, 2703, 2702, 106, 87, 2700, 109, 115, 2698, 2697,
+ 17, 7, 2695, 2, 1, 5, 70, 2694, 2692, 126,
+ 2685, 2679, 2678, 102, 2677, 2676, 665, 2674, 99, 139,
+ 111, 78, 2672, 54, 61, 2659, 2654, 2653, 2645, 2633,
+ 57, 2628, 2626, 2625, 145, 320, 164, 2611, 56, 74,
+ 59, 133, 2610, 55, 82, 198, 168, 2608, 2606, 137,
+ 142, 2605, 2594, 65, 47, 44, 2593, 101, 134, 124,
+ 46, 100, 135, 2592, 2591, 67, 77, 2586, 2580, 2579,
+ 2578, 170, 2576, 2574, 76, 2572, 58, 2571, 191, 2566,
+ 19, 69, 2565, 52, 171, 2564, 104, 2563, 2562, 68,
+ 103, 73, 33, 2561, 176, 2545, 63, 161, 129, 159,
+ 2539, 2533, 2532, 2531, 195, 357, 2530, 2529, 79, 173,
+ 153, 149, 97, 2528, 347, 2527, 2523, 20, 2256, 6149,
+ 2522, 38, 160, 2516, 2515, 5912, 24, 50, 26, 2511,
+ 119, 2508, 2505, 2502, 2496, 228, 178, 118, 158, 62,
+ 2494, 2492, 2491, 21, 2490, 2487, 2486, 2485, 2484, 2482,
+ 98, 37, 36, 35, 199, 88, 14, 110, 155, 89,
+ 2481, 2480, 2477, 128, 94, 2476, 157, 156, 130, 169,
+ 2475, 179, 146, 138, 2474, 95, 34, 2473, 2469, 2467,
+ 2460, 136, 2452, 2443, 2438, 2437, 151, 147, 127, 92,
+ 2436, 93, 123, 150, 148, 48, 2430, 49, 2428, 2426,
+ 32, 192, 31, 2424, 18, 113, 112, 2423, 3848, 183,
+ 2422, 23, 321, 152, 2421, 2420, 10, 13, 15, 2419,
+ 2416, 2412, 2407, 143, 2404, 2403, 2402, 2397, 29, 64,
+ 27, 3, 120, 91, 2395, 2391, 5054, 0, 132, 2388,
+ 204,
}
-//line sql.y:7884
+//line sql.y:7871
type yySymType struct {
union any
empty struct{}
@@ -7433,68 +7354,68 @@ var yyR1 = [...]int{
351, 351, 351, 351, 351, 351, 351, 351, 351, 351,
351, 351, 351, 351, 351, 351, 351, 351, 351, 277,
277, 277, 385, 385, 385, 385, 385, 385, 384, 384,
- 384, 350, 350, 350, 383, 383, 57, 57, 214, 214,
- 402, 402, 403, 403, 403, 45, 45, 45, 45, 45,
- 45, 44, 44, 44, 40, 40, 40, 40, 40, 40,
+ 384, 350, 350, 350, 350, 383, 383, 57, 57, 214,
+ 214, 402, 402, 403, 403, 403, 45, 45, 45, 45,
+ 45, 45, 44, 44, 44, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 40, 40, 40, 40, 46, 46, 41, 41, 41,
- 41, 41, 41, 41, 41, 41, 41, 27, 27, 27,
+ 40, 40, 40, 40, 40, 40, 46, 46, 41, 41,
+ 41, 41, 41, 41, 41, 41, 41, 41, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
- 27, 27, 27, 27, 27, 106, 106, 107, 107, 107,
- 107, 109, 109, 109, 366, 366, 58, 58, 3, 3,
- 169, 171, 172, 172, 170, 170, 170, 170, 170, 170,
- 60, 60, 59, 59, 174, 173, 175, 175, 175, 1,
- 1, 2, 2, 4, 4, 371, 371, 371, 371, 371,
+ 27, 27, 27, 27, 27, 27, 106, 106, 107, 107,
+ 107, 107, 109, 109, 109, 366, 366, 58, 58, 3,
+ 3, 169, 171, 172, 172, 170, 170, 170, 170, 170,
+ 170, 60, 60, 59, 59, 174, 173, 175, 175, 175,
+ 1, 1, 2, 2, 4, 4, 371, 371, 371, 371,
371, 371, 371, 371, 371, 371, 371, 371, 371, 371,
- 371, 371, 371, 371, 371, 371, 371, 332, 332, 332,
- 365, 365, 367, 108, 108, 108, 108, 108, 108, 108,
- 108, 108, 108, 112, 111, 111, 110, 113, 113, 113,
- 113, 113, 113, 113, 113, 369, 369, 369, 61, 61,
- 370, 321, 322, 323, 5, 6, 346, 368, 120, 120,
- 28, 37, 37, 29, 29, 29, 29, 30, 30, 62,
+ 371, 371, 371, 371, 371, 371, 371, 371, 332, 332,
+ 332, 365, 365, 367, 108, 108, 108, 108, 108, 108,
+ 108, 108, 108, 108, 112, 111, 111, 110, 113, 113,
+ 113, 113, 113, 113, 113, 113, 369, 369, 369, 61,
+ 61, 370, 321, 322, 323, 5, 6, 346, 368, 120,
+ 120, 28, 37, 37, 29, 29, 29, 29, 30, 30,
+ 62, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
- 63, 276, 276, 285, 285, 275, 275, 300, 300, 300,
- 278, 278, 278, 279, 279, 395, 395, 395, 272, 272,
- 64, 64, 64, 301, 301, 301, 301, 66, 66, 67,
- 68, 68, 303, 303, 304, 304, 69, 70, 82, 82,
- 82, 82, 82, 82, 82, 105, 105, 105, 15, 15,
- 15, 15, 78, 78, 78, 14, 14, 65, 65, 72,
- 392, 392, 393, 394, 394, 394, 394, 73, 75, 31,
- 31, 31, 31, 31, 31, 130, 130, 118, 118, 118,
- 118, 118, 118, 118, 118, 118, 118, 118, 118, 125,
- 125, 125, 119, 119, 409, 76, 77, 77, 123, 123,
- 123, 116, 116, 116, 122, 122, 122, 16, 16, 17,
- 258, 258, 18, 18, 127, 127, 129, 129, 129, 129,
- 129, 131, 131, 131, 131, 131, 131, 131, 126, 126,
- 128, 128, 128, 128, 293, 293, 293, 292, 292, 163,
- 163, 165, 164, 164, 166, 166, 167, 167, 167, 167,
- 212, 212, 189, 189, 251, 251, 252, 252, 250, 250,
- 257, 257, 253, 253, 253, 253, 260, 260, 168, 168,
- 168, 168, 176, 176, 177, 177, 178, 178, 302, 302,
- 298, 298, 298, 297, 297, 182, 182, 182, 184, 183,
- 183, 183, 183, 185, 185, 187, 187, 186, 186, 188,
- 193, 193, 192, 192, 190, 190, 190, 190, 191, 191,
- 191, 191, 194, 194, 140, 140, 140, 140, 140, 140,
- 140, 155, 155, 155, 155, 158, 158, 158, 158, 158,
- 158, 158, 158, 158, 158, 158, 241, 241, 146, 146,
+ 63, 63, 276, 276, 285, 285, 275, 275, 300, 300,
+ 300, 278, 278, 278, 279, 279, 395, 395, 395, 272,
+ 272, 64, 64, 64, 301, 301, 301, 301, 66, 66,
+ 67, 68, 68, 303, 303, 304, 304, 69, 70, 82,
+ 82, 82, 82, 82, 82, 82, 105, 105, 105, 15,
+ 15, 15, 15, 78, 78, 78, 14, 14, 65, 65,
+ 72, 392, 392, 393, 394, 394, 394, 394, 73, 75,
+ 31, 31, 31, 31, 31, 31, 130, 130, 118, 118,
+ 118, 118, 118, 118, 118, 118, 118, 118, 118, 118,
+ 125, 125, 125, 119, 119, 409, 76, 77, 77, 123,
+ 123, 123, 116, 116, 116, 122, 122, 122, 16, 16,
+ 17, 258, 258, 18, 18, 127, 127, 129, 129, 129,
+ 129, 129, 131, 131, 131, 131, 131, 131, 131, 126,
+ 126, 128, 128, 128, 128, 293, 293, 293, 292, 292,
+ 163, 163, 165, 164, 164, 166, 166, 167, 167, 167,
+ 167, 212, 212, 189, 189, 251, 251, 252, 252, 250,
+ 250, 257, 257, 253, 253, 253, 253, 260, 260, 168,
+ 168, 168, 168, 176, 176, 177, 177, 178, 178, 302,
+ 302, 298, 298, 298, 297, 297, 182, 182, 182, 184,
+ 183, 183, 183, 183, 185, 185, 187, 187, 186, 186,
+ 188, 193, 193, 192, 192, 190, 190, 190, 190, 191,
+ 191, 191, 191, 194, 194, 140, 140, 140, 140, 140,
+ 140, 140, 155, 155, 155, 155, 158, 158, 158, 158,
+ 158, 158, 158, 158, 158, 158, 158, 241, 241, 146,
146, 146, 146, 146, 146, 146, 146, 146, 146, 146,
- 146, 150, 150, 150, 150, 150, 150, 150, 150, 150,
+ 146, 146, 150, 150, 150, 150, 150, 150, 150, 150,
150, 150, 150, 150, 150, 150, 150, 150, 150, 150,
- 150, 150, 150, 150, 150, 150, 149, 217, 217, 216,
- 216, 83, 83, 83, 84, 84, 85, 85, 85, 85,
- 85, 86, 86, 86, 86, 86, 141, 141, 88, 88,
- 87, 87, 207, 207, 290, 290, 89, 90, 90, 93,
- 93, 92, 91, 91, 97, 97, 94, 94, 96, 96,
- 95, 98, 98, 99, 100, 100, 273, 273, 195, 195,
- 203, 203, 203, 203, 196, 196, 196, 196, 196, 196,
- 196, 204, 204, 204, 211, 205, 205, 201, 201, 199,
- 199, 199, 199, 199, 199, 199, 199, 199, 199, 200,
+ 150, 150, 150, 150, 150, 150, 150, 149, 217, 217,
+ 216, 216, 83, 83, 83, 84, 84, 85, 85, 85,
+ 85, 85, 86, 86, 86, 86, 86, 141, 141, 88,
+ 88, 87, 87, 207, 207, 290, 290, 89, 90, 90,
+ 93, 93, 92, 91, 91, 97, 97, 94, 94, 96,
+ 96, 95, 98, 98, 99, 100, 100, 273, 273, 195,
+ 195, 203, 203, 203, 203, 196, 196, 196, 196, 196,
+ 196, 196, 204, 204, 204, 211, 205, 205, 201, 201,
+ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
@@ -7503,34 +7424,34 @@ var yyR1 = [...]int{
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
- 200, 200, 200, 200, 200, 160, 160, 160, 160, 222,
- 222, 147, 147, 147, 147, 147, 147, 147, 147, 147,
- 147, 147, 147, 147, 147, 147, 148, 148, 161, 161,
- 161, 161, 162, 162, 162, 162, 162, 162, 162, 310,
- 310, 115, 115, 115, 115, 115, 115, 115, 115, 115,
- 115, 115, 115, 114, 114, 114, 114, 114, 114, 114,
- 114, 114, 410, 410, 324, 324, 324, 324, 202, 202,
- 202, 202, 202, 121, 121, 121, 121, 121, 307, 307,
- 307, 311, 311, 311, 309, 309, 309, 309, 309, 309,
- 309, 309, 309, 309, 309, 309, 309, 309, 309, 312,
- 312, 220, 220, 117, 117, 218, 218, 219, 221, 221,
- 213, 213, 213, 213, 215, 215, 198, 198, 198, 223,
- 223, 224, 224, 101, 102, 102, 103, 103, 225, 225,
- 227, 226, 226, 228, 229, 229, 229, 230, 230, 231,
- 231, 231, 47, 47, 47, 47, 47, 42, 42, 42,
- 42, 43, 43, 43, 43, 132, 132, 132, 132, 134,
- 134, 133, 133, 79, 79, 80, 80, 80, 138, 138,
- 139, 139, 139, 136, 136, 137, 137, 248, 248, 232,
- 232, 232, 239, 239, 239, 235, 235, 237, 237, 237,
- 238, 238, 238, 236, 245, 245, 247, 247, 246, 246,
- 242, 242, 243, 243, 244, 244, 244, 240, 240, 197,
- 197, 197, 197, 197, 249, 249, 249, 249, 261, 261,
- 208, 208, 210, 210, 209, 209, 159, 262, 262, 266,
- 263, 263, 267, 267, 267, 267, 255, 255, 255, 264,
- 264, 265, 265, 294, 294, 294, 271, 271, 284, 284,
- 280, 280, 281, 281, 274, 274, 286, 286, 286, 74,
- 206, 206, 362, 362, 359, 289, 289, 291, 291, 295,
- 295, 299, 299, 296, 296, 287, 287, 287, 287, 287,
+ 200, 200, 200, 200, 200, 200, 160, 160, 160, 160,
+ 222, 222, 147, 147, 147, 147, 147, 147, 147, 147,
+ 147, 147, 147, 147, 147, 147, 147, 148, 148, 161,
+ 161, 161, 161, 162, 162, 162, 162, 162, 162, 162,
+ 310, 310, 115, 115, 115, 115, 115, 115, 115, 115,
+ 115, 115, 115, 115, 114, 114, 114, 114, 114, 114,
+ 114, 114, 114, 410, 410, 324, 324, 324, 324, 202,
+ 202, 202, 202, 202, 121, 121, 121, 121, 121, 307,
+ 307, 307, 311, 311, 311, 309, 309, 309, 309, 309,
+ 309, 309, 309, 309, 309, 309, 309, 309, 309, 309,
+ 312, 312, 220, 220, 117, 117, 218, 218, 219, 221,
+ 221, 213, 213, 213, 213, 215, 215, 198, 198, 198,
+ 223, 223, 224, 224, 101, 102, 102, 103, 103, 225,
+ 225, 227, 226, 226, 228, 229, 229, 229, 230, 230,
+ 231, 231, 231, 47, 47, 47, 47, 47, 42, 42,
+ 42, 42, 43, 43, 43, 43, 132, 132, 132, 132,
+ 134, 134, 133, 133, 79, 79, 80, 80, 80, 138,
+ 138, 139, 139, 139, 136, 136, 137, 137, 248, 248,
+ 232, 232, 232, 239, 239, 239, 235, 235, 237, 237,
+ 237, 238, 238, 238, 236, 245, 245, 247, 247, 246,
+ 246, 242, 242, 243, 243, 244, 244, 244, 240, 240,
+ 197, 197, 197, 197, 197, 249, 249, 249, 249, 261,
+ 261, 208, 208, 210, 210, 209, 209, 159, 262, 262,
+ 266, 263, 263, 267, 267, 267, 267, 255, 255, 255,
+ 264, 264, 265, 265, 294, 294, 294, 271, 271, 284,
+ 284, 280, 280, 281, 281, 274, 274, 286, 286, 286,
+ 74, 206, 206, 362, 362, 359, 289, 289, 291, 291,
+ 295, 295, 299, 299, 296, 296, 287, 287, 287, 287,
287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
@@ -7545,7 +7466,7 @@ var yyR1 = [...]int{
287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
287, 287, 287, 287, 287, 287, 287, 287, 287, 287,
- 287, 287, 287, 287, 288, 288, 288, 288, 288, 288,
+ 287, 287, 287, 287, 287, 288, 288, 288, 288, 288,
288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
@@ -7584,8 +7505,8 @@ var yyR1 = [...]int{
288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
288, 288, 288, 288, 288, 288, 288, 288, 288, 288,
- 288, 288, 288, 288, 288, 288, 406, 407, 305, 306,
- 306, 306,
+ 288, 288, 288, 288, 288, 288, 288, 406, 407, 305,
+ 306, 306, 306,
}
var yyR2 = [...]int{
@@ -7638,104 +7559,104 @@ var yyR2 = [...]int{
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 5, 0,
2, 2, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 0, 1, 0, 1, 0, 2,
- 0, 2, 0, 2, 2, 0, 1, 5, 1, 3,
- 7, 1, 3, 3, 1, 2, 2, 2, 5, 5,
- 5, 6, 8, 5, 5, 4, 4, 4, 6, 5,
- 5, 5, 2, 2, 2, 2, 3, 3, 3, 4,
- 3, 3, 1, 3, 5, 1, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 2, 2, 3, 4, 4,
- 2, 11, 3, 6, 8, 6, 6, 6, 13, 8,
- 6, 10, 5, 5, 5, 7, 5, 5, 5, 5,
- 5, 7, 7, 5, 5, 0, 6, 5, 6, 4,
- 5, 0, 8, 9, 0, 3, 0, 1, 0, 3,
- 8, 4, 1, 3, 3, 6, 7, 7, 8, 4,
- 0, 1, 0, 1, 3, 3, 1, 1, 2, 1,
- 1, 0, 2, 0, 2, 5, 3, 7, 4, 4,
- 4, 4, 3, 3, 3, 7, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 2, 0, 2, 2,
- 1, 3, 2, 0, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 3, 1, 3, 3, 0, 2, 2,
- 2, 2, 2, 2, 2, 4, 4, 3, 0, 1,
- 4, 3, 4, 4, 3, 3, 3, 2, 1, 3,
- 3, 3, 5, 7, 7, 6, 5, 3, 2, 3,
- 3, 3, 7, 3, 3, 3, 3, 4, 7, 5,
- 2, 4, 4, 4, 4, 4, 5, 5, 4, 4,
- 4, 4, 4, 4, 4, 4, 2, 2, 4, 4,
- 4, 4, 4, 2, 3, 3, 3, 5, 2, 3,
- 3, 2, 3, 4, 4, 4, 3, 4, 4, 5,
- 3, 0, 1, 0, 1, 1, 1, 0, 2, 2,
- 0, 2, 2, 0, 2, 0, 1, 1, 1, 1,
- 2, 1, 3, 1, 1, 1, 1, 1, 2, 1,
- 1, 5, 0, 1, 0, 1, 2, 3, 0, 3,
- 3, 3, 3, 3, 1, 1, 1, 1, 1, 1,
- 1, 1, 0, 1, 1, 4, 4, 2, 2, 3,
- 1, 3, 2, 1, 2, 1, 2, 2, 4, 3,
- 3, 6, 4, 7, 6, 1, 3, 2, 2, 2,
- 2, 1, 1, 1, 3, 2, 1, 1, 1, 0,
- 1, 1, 0, 3, 0, 2, 0, 2, 1, 2,
- 2, 0, 1, 1, 0, 1, 1, 5, 5, 4,
- 0, 2, 4, 4, 0, 1, 0, 1, 2, 3,
- 4, 1, 1, 1, 1, 1, 1, 1, 1, 3,
- 1, 2, 3, 5, 0, 1, 2, 1, 1, 0,
- 1, 2, 1, 3, 1, 1, 1, 4, 3, 1,
- 1, 2, 3, 7, 0, 3, 0, 1, 1, 3,
- 1, 3, 1, 1, 3, 3, 1, 3, 4, 4,
- 4, 3, 2, 4, 0, 1, 0, 2, 0, 1,
- 0, 1, 2, 1, 1, 1, 2, 2, 1, 2,
- 3, 2, 3, 2, 2, 2, 1, 1, 3, 3,
- 0, 1, 1, 2, 6, 5, 6, 6, 0, 2,
- 3, 3, 0, 2, 3, 3, 3, 2, 3, 1,
- 6, 3, 4, 3, 1, 3, 4, 5, 6, 3,
- 4, 5, 6, 3, 4, 1, 1, 1, 3, 3,
+ 1, 3, 1, 1, 1, 0, 1, 0, 1, 0,
+ 2, 0, 2, 0, 2, 2, 0, 1, 5, 1,
+ 3, 7, 1, 3, 3, 1, 2, 2, 2, 5,
+ 5, 5, 6, 8, 5, 5, 4, 4, 4, 6,
+ 5, 5, 5, 2, 2, 2, 2, 3, 3, 3,
+ 4, 3, 3, 1, 3, 5, 1, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 2, 2, 3, 4,
+ 4, 2, 11, 3, 6, 8, 6, 6, 6, 13,
+ 8, 6, 10, 5, 5, 5, 7, 5, 5, 5,
+ 5, 5, 7, 7, 5, 5, 0, 6, 5, 6,
+ 4, 5, 0, 8, 9, 0, 3, 0, 1, 0,
+ 3, 8, 4, 1, 3, 3, 6, 7, 7, 8,
+ 4, 0, 1, 0, 1, 3, 3, 1, 1, 2,
+ 1, 1, 0, 2, 0, 2, 5, 3, 7, 4,
+ 4, 4, 4, 3, 3, 3, 7, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 2, 0, 2,
+ 2, 1, 3, 2, 0, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 1, 3, 3, 0, 2,
+ 2, 2, 2, 2, 2, 2, 4, 4, 3, 0,
+ 1, 4, 3, 4, 4, 3, 3, 3, 2, 1,
+ 3, 3, 3, 5, 7, 7, 6, 5, 3, 2,
+ 3, 3, 3, 7, 3, 3, 3, 3, 4, 7,
+ 5, 2, 4, 4, 4, 4, 4, 5, 5, 4,
+ 4, 4, 4, 4, 4, 4, 4, 2, 2, 4,
+ 4, 4, 4, 4, 2, 3, 3, 3, 5, 2,
+ 3, 3, 2, 3, 4, 4, 4, 3, 4, 4,
+ 5, 3, 0, 1, 0, 1, 1, 1, 0, 2,
+ 2, 0, 2, 2, 0, 2, 0, 1, 1, 1,
+ 1, 2, 1, 3, 1, 1, 1, 1, 1, 2,
+ 1, 1, 5, 0, 1, 0, 1, 2, 3, 0,
+ 3, 3, 3, 3, 3, 1, 1, 1, 1, 1,
+ 1, 1, 1, 0, 1, 1, 4, 4, 2, 2,
+ 3, 1, 3, 2, 1, 2, 1, 2, 2, 4,
+ 3, 3, 6, 4, 7, 6, 1, 3, 2, 2,
+ 2, 2, 1, 1, 1, 3, 2, 1, 1, 1,
+ 0, 1, 1, 0, 3, 0, 2, 0, 2, 1,
+ 2, 2, 0, 1, 1, 0, 1, 1, 5, 5,
+ 4, 0, 2, 4, 4, 0, 1, 0, 1, 2,
+ 3, 4, 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 1, 2, 3, 5, 0, 1, 2, 1, 1,
+ 0, 1, 2, 1, 3, 1, 1, 1, 4, 3,
+ 1, 1, 2, 3, 7, 0, 3, 0, 1, 1,
+ 3, 1, 3, 1, 1, 3, 3, 1, 3, 4,
+ 4, 4, 3, 2, 4, 0, 1, 0, 2, 0,
+ 1, 0, 1, 2, 1, 1, 1, 2, 2, 1,
+ 2, 3, 2, 3, 2, 2, 2, 1, 1, 3,
+ 3, 0, 1, 1, 2, 6, 5, 6, 6, 0,
+ 2, 3, 3, 0, 2, 3, 3, 3, 2, 3,
+ 1, 6, 3, 4, 3, 1, 3, 4, 5, 6,
+ 3, 4, 5, 6, 3, 4, 1, 1, 1, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 1, 1, 1, 1, 1, 3, 1, 1, 1, 2,
- 2, 2, 2, 1, 1, 2, 7, 7, 6, 6,
- 2, 2, 1, 6, 3, 3, 3, 1, 3, 1,
- 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 2, 2, 2, 2, 2, 1, 1, 0, 1,
- 2, 5, 0, 3, 0, 1, 4, 4, 2, 0,
- 1, 1, 2, 2, 1, 1, 2, 2, 0, 1,
- 1, 1, 1, 5, 1, 3, 0, 3, 1, 1,
- 1, 2, 1, 2, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 3, 4, 6, 4,
- 4, 8, 6, 8, 6, 5, 4, 10, 2, 2,
- 1, 2, 2, 2, 4, 5, 5, 5, 5, 5,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 8, 8, 8, 6, 5, 4, 4, 4, 4, 4,
- 7, 4, 4, 6, 6, 6, 8, 6, 6, 4,
- 4, 3, 4, 6, 6, 4, 4, 4, 6, 8,
- 6, 4, 6, 6, 8, 10, 7, 8, 8, 9,
- 4, 4, 4, 4, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 4, 4, 6, 5, 9, 6,
- 9, 1, 1, 1, 1, 1, 1, 1, 1, 0,
- 2, 6, 8, 10, 12, 14, 6, 8, 8, 10,
- 12, 14, 6, 8, 10, 12, 6, 8, 4, 4,
- 3, 4, 6, 6, 4, 6, 4, 6, 8, 0,
- 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 1, 1, 1, 1, 1, 3, 1, 1, 1,
+ 2, 2, 2, 2, 1, 1, 2, 7, 7, 6,
+ 6, 2, 2, 1, 6, 3, 3, 3, 1, 3,
+ 1, 3, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 1, 1, 0,
+ 1, 2, 5, 0, 3, 0, 1, 4, 4, 2,
+ 0, 1, 1, 2, 2, 1, 1, 2, 2, 0,
+ 1, 1, 1, 1, 5, 1, 3, 0, 3, 1,
+ 1, 1, 2, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 3, 4, 6,
+ 4, 4, 8, 6, 8, 6, 5, 4, 10, 2,
+ 2, 1, 2, 2, 2, 4, 5, 5, 5, 5,
+ 5, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 8, 8, 8, 6, 5, 4, 4, 4, 4,
+ 4, 7, 4, 4, 6, 6, 6, 8, 6, 6,
+ 4, 4, 3, 4, 6, 6, 4, 4, 4, 6,
+ 8, 6, 4, 6, 6, 8, 10, 7, 8, 8,
+ 9, 4, 4, 4, 4, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 4, 4, 6, 5, 9,
+ 6, 9, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 2, 6, 8, 10, 12, 14, 6, 8, 8,
+ 10, 12, 14, 6, 8, 10, 12, 6, 8, 4,
+ 4, 3, 4, 6, 6, 4, 6, 4, 6, 8,
+ 0, 2, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 0, 2, 0, 2, 3, 3, 4, 4,
- 4, 4, 4, 0, 3, 4, 7, 3, 1, 1,
- 1, 0, 5, 5, 2, 3, 1, 2, 2, 1,
- 2, 1, 2, 2, 1, 2, 2, 1, 1, 0,
- 1, 0, 1, 0, 2, 1, 2, 4, 0, 2,
- 1, 1, 3, 5, 1, 1, 1, 2, 2, 0,
- 3, 0, 2, 2, 1, 3, 0, 1, 0, 1,
- 3, 1, 3, 2, 0, 1, 1, 0, 1, 2,
- 4, 4, 0, 2, 2, 1, 1, 3, 3, 3,
- 3, 3, 3, 3, 3, 0, 3, 3, 3, 0,
- 3, 1, 1, 0, 4, 0, 1, 1, 0, 3,
- 1, 3, 2, 1, 1, 0, 1, 2, 4, 9,
- 3, 5, 0, 3, 3, 0, 1, 0, 2, 2,
- 0, 2, 2, 2, 0, 2, 1, 2, 3, 3,
- 0, 2, 1, 2, 3, 4, 3, 0, 1, 2,
- 1, 5, 4, 4, 1, 3, 3, 5, 0, 5,
- 1, 3, 1, 2, 3, 4, 1, 1, 3, 3,
- 1, 3, 3, 3, 3, 3, 1, 1, 2, 1,
- 2, 1, 1, 1, 1, 1, 1, 1, 0, 1,
- 0, 2, 0, 3, 0, 1, 0, 1, 1, 5,
- 0, 1, 0, 1, 2, 1, 1, 1, 1, 1,
- 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 0, 2, 0, 2, 3, 3, 4,
+ 4, 4, 4, 4, 0, 3, 4, 7, 3, 1,
+ 1, 1, 0, 5, 5, 2, 3, 1, 2, 2,
+ 1, 2, 1, 2, 2, 1, 2, 2, 1, 1,
+ 0, 1, 0, 1, 0, 2, 1, 2, 4, 0,
+ 2, 1, 1, 3, 5, 1, 1, 1, 2, 2,
+ 0, 3, 0, 2, 2, 1, 3, 0, 1, 0,
+ 1, 3, 1, 3, 2, 0, 1, 1, 0, 1,
+ 2, 4, 4, 0, 2, 2, 1, 1, 3, 3,
+ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3,
+ 0, 3, 1, 1, 0, 4, 0, 1, 1, 0,
+ 3, 1, 3, 2, 1, 1, 0, 1, 2, 4,
+ 9, 3, 5, 0, 3, 3, 0, 1, 0, 2,
+ 2, 0, 2, 2, 2, 0, 2, 1, 2, 3,
+ 3, 0, 2, 1, 2, 3, 4, 3, 0, 1,
+ 2, 1, 5, 4, 4, 1, 3, 3, 5, 0,
+ 5, 1, 3, 1, 2, 3, 4, 1, 1, 3,
+ 3, 1, 3, 3, 3, 3, 3, 1, 1, 2,
+ 1, 2, 1, 1, 1, 1, 1, 1, 1, 0,
+ 1, 0, 2, 0, 3, 0, 1, 0, 1, 1,
+ 5, 0, 1, 0, 1, 2, 1, 1, 1, 1,
+ 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -7789,8 +7710,8 @@ var yyR2 = [...]int{
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
- 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
+ 0, 1, 1,
}
var yyChk = [...]int{
@@ -7928,7 +7849,7 @@ var yyChk = [...]int{
145, 159, -241, 140, 163, 164, 165, 166, 167, 168,
169, 171, 170, 172, 173, 160, 161, 176, 223, 224,
-150, -150, -150, -150, -211, -217, -216, -406, -213, -378,
- -288, -295, -406, -406, -150, -273, -406, -406, -150, -406,
+ -288, -295, -406, -406, -150, -273, -406, -406, -146, -406,
-406, -406, -220, -140, -406, -406, -410, -406, -410, -410,
-324, -406, -324, -406, -406, -406, -406, -406, -406, -406,
-406, -406, -406, -406, -406, -406, -406, -406, -406, -406,
@@ -8133,338 +8054,338 @@ var yyChk = [...]int{
-140, -140, 88, -407, -407, -407, -103, 88, -100, -99,
-289, 77, 122, -262, -289, 89, -407, -406, -230, 89,
-234, -11, 87, -3, 272, -321, -370, -322, -323, -5,
- -6, -346, -79, 505, -372, -350, -291, 90, 96, 89,
- 505, -407, -407, -86, 145, 624, 594, -141, -152, -149,
- 220, -407, 88, -407, 88, -407, 88, -289, 243, -101,
- 88, 26, -298, -172, -170, -289, 558, -390, -389, 501,
- -400, -396, 119, 143, 101, -398, 596, 552, 128, 129,
- -79, -140, 87, -407, -80, 287, 611, -381, 506, -86,
- 625, 572, 547, 572, 547, -140, -140, -140, -99, -406,
- -407, 88, 23, -313, -60, 569, -387, -388, 77, -391,
- 379, 568, 589, 119, 90, 89, -250, 248, -374, 507,
- 142, -407, 88, -407, 88, -407, -89, -170, 565, -326,
- -154, -388, 77, -387, 77, 14, 13, -4, 655, 89,
- 289, -86, -140, -140, -407, -59, 27, -171, -386, 256,
- 251, 254, 33, -386, 96, -4, -407, -407, 569, 250,
- 32, 119, -154, -174, -173, -173,
+ -6, -346, -79, 505, -372, -350, -295, -291, 90, 96,
+ 89, 505, -407, -407, -86, 145, 624, 594, -141, -152,
+ -149, 220, -407, 88, -407, 88, -407, 88, -289, 243,
+ -101, 88, 26, -298, -172, -170, -289, 558, -390, -389,
+ 501, -400, -396, 119, 143, 101, -398, 596, 552, 128,
+ 129, -79, -140, 87, -407, -80, 287, 611, 221, -381,
+ 506, -86, 625, 572, 547, 572, 547, -140, -140, -140,
+ -99, -406, -407, 88, 23, -313, -60, 569, -387, -388,
+ 77, -391, 379, 568, 589, 119, 90, 89, -250, 248,
+ -296, -374, 507, 142, -407, 88, -407, 88, -407, -89,
+ -170, 565, -326, -154, -388, 77, -387, 77, 14, 13,
+ -4, 655, 89, 289, -86, -140, -140, -407, -59, 27,
+ -171, -386, 256, 251, 254, 33, -386, 96, -4, -407,
+ -407, 569, 250, 32, 119, -154, -174, -173, -173,
}
var yyDef = [...]int{
- 844, -2, -2, 846, 2, 4, 5, 6, 7, 8,
+ 845, -2, -2, 847, 2, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 69, 71,
- 72, 844, 844, 844, 0, 844, 0, 0, 844, -2,
- -2, 844, 1461, 0, 844, 0, 0, -2, 771, 777,
- 0, 779, -2, 0, 0, 844, 2008, 2008, 839, 0,
- 0, 0, 0, 0, 844, 844, 844, 844, 1318, 49,
- 844, 0, 84, 85, 795, 796, 797, 64, 0, 2006,
- 845, 1, 3, 70, 74, 0, 0, 0, 57, 1327,
- 0, 77, 0, 0, 848, 0, 0, 1444, 844, 844,
+ 72, 845, 845, 845, 0, 845, 0, 0, 845, -2,
+ -2, 845, 1462, 0, 845, 0, 0, -2, 772, 778,
+ 0, 780, -2, 0, 0, 845, 2009, 2009, 840, 0,
+ 0, 0, 0, 0, 845, 845, 845, 845, 1319, 49,
+ 845, 0, 84, 85, 796, 797, 798, 64, 0, 2007,
+ 846, 1, 3, 70, 74, 0, 0, 0, 57, 1328,
+ 0, 77, 0, 0, 849, 0, 0, 1445, 845, 845,
0, 116, 117, 0, 0, 0, -2, 120, -2, 149,
- 150, 151, 0, 156, 585, 508, 560, 506, 545, -2,
- 494, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 511, 384, 384, 0, 0, -2, 494,
- 494, 494, 1446, 0, 0, 0, 542, 446, 384, 384,
+ 150, 151, 0, 156, 586, 509, 561, 507, 546, -2,
+ 495, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 512, 384, 384, 0, 0, -2, 495,
+ 495, 495, 1447, 0, 0, 0, 543, 446, 384, 384,
384, 0, 384, 384, 384, 384, 0, 0, 384, 384,
384, 384, 384, 384, 384, 384, 384, 384, 384, 384,
- 384, 384, 384, 384, 384, 1345, 155, 1462, 1459, 1460,
- 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623,
- 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633,
- 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643,
- 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653,
- 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663,
- 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673,
- 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683,
- 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693,
- 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703,
- 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713,
- 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723,
- 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733,
- 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743,
- 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753,
- 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763,
- 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773,
- 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783,
- 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793,
- 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803,
- 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813,
- 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823,
- 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833,
- 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843,
- 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853,
- 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863,
- 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873,
- 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883,
- 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893,
- 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903,
- 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913,
- 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923,
- 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933,
- 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943,
- 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953,
- 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963,
- 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973,
- 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983,
- 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,
- 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
- 2004, 2005, 0, 1438, 0, 698, 947, 0, 760, 760,
- 0, 760, 760, 760, 760, 0, 0, 0, 710, 0,
- 0, 0, 0, 757, 0, 726, 727, 0, 757, 0,
- 733, 763, 0, 738, 760, 760, 741, 2009, 0, 2009,
- 2009, 1429, 0, 754, 752, 766, 767, 39, 770, 773,
- 774, 775, 776, 778, 0, 783, 786, 1455, 1456, 0,
- 788, 807, 808, 0, 840, 841, 44, 1095, 0, 969,
- 974, 985, 1000, 1001, 1002, 1003, 1004, 1006, 1007, 1008,
- 0, 0, 0, 0, 1013, 1014, 0, 0, 0, 0,
- 0, 1076, 1022, 0, 0, 0, 0, 1291, 0, 0,
- 1252, 1252, 1110, 1252, 1254, 1254, 1662, 1798, 1806, 1923,
- 1625, 1630, 1631, 1632, 1916, 1917, 1918, 1919, 1957, 1958,
- 1962, 1722, 0, 0, 0, 2005, 1759, 1767, 1768, 1792,
- 1889, 1943, 1642, 1787, 1855, 1719, 1741, 1742, 1871, 1872,
- 1763, 1764, 1745, 1757, 1760, 1748, 1749, 1751, 1753, 1758,
- 1765, 1771, 1750, 1770, 1769, 0, 1746, 1747, 1752, 1762,
- 1766, 1754, 1755, 1756, 1761, 1772, 0, 0, 0, 0,
- 0, 1191, 1192, 1193, 1194, 0, 0, 0, 0, 0,
- 0, 0, 280, 281, 1304, 1305, 42, 43, 1094, 1416,
- 1254, 1254, 1254, 1254, 1254, 1036, 1037, 1038, 1039, 1040,
- 1064, 1065, 1071, 1072, 1866, 1867, 1868, 1869, 1703, 1952,
- 1711, 1712, 1850, 1851, 1724, 1725, 1980, 1981, -2, -2,
+ 384, 384, 384, 384, 384, 1346, 155, 1463, 1460, 1461,
+ 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624,
+ 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634,
+ 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644,
+ 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654,
+ 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664,
+ 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674,
+ 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684,
+ 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694,
+ 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704,
+ 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714,
+ 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724,
+ 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734,
+ 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744,
+ 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754,
+ 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764,
+ 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774,
+ 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784,
+ 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794,
+ 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804,
+ 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814,
+ 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824,
+ 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834,
+ 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844,
+ 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854,
+ 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864,
+ 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874,
+ 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884,
+ 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894,
+ 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904,
+ 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914,
+ 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924,
+ 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934,
+ 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944,
+ 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954,
+ 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964,
+ 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974,
+ 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984,
+ 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994,
+ 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+ 2005, 2006, 0, 1439, 0, 699, 948, 0, 761, 761,
+ 0, 761, 761, 761, 761, 0, 0, 0, 711, 0,
+ 0, 0, 0, 758, 0, 727, 728, 0, 758, 0,
+ 734, 764, 0, 739, 761, 761, 742, 2010, 0, 2010,
+ 2010, 1430, 0, 755, 753, 767, 768, 39, 771, 774,
+ 775, 776, 777, 779, 0, 784, 787, 1456, 1457, 0,
+ 789, 808, 809, 0, 841, 842, 44, 1096, 0, 970,
+ 975, 986, 1001, 1002, 1003, 1004, 1005, 1007, 1008, 1009,
+ 0, 0, 0, 0, 1014, 1015, 0, 0, 0, 0,
+ 0, 1077, 1023, 0, 0, 0, 0, 1292, 0, 0,
+ 1253, 1253, 1111, 1253, 1255, 1255, 1663, 1799, 1807, 1924,
+ 1626, 1631, 1632, 1633, 1917, 1918, 1919, 1920, 1958, 1959,
+ 1963, 1723, 0, 0, 0, 2006, 1760, 1768, 1769, 1793,
+ 1890, 1944, 1643, 1788, 1856, 1720, 1742, 1743, 1872, 1873,
+ 1764, 1765, 1746, 1758, 1761, 1749, 1750, 1752, 1754, 1759,
+ 1766, 1772, 1751, 1771, 1770, 0, 1747, 1748, 1753, 1763,
+ 1767, 1755, 1756, 1757, 1762, 1773, 0, 0, 0, 0,
+ 0, 1192, 1193, 1194, 1195, 0, 0, 0, 0, 0,
+ 0, 0, 280, 281, 1305, 1306, 42, 43, 1095, 1417,
+ 1255, 1255, 1255, 1255, 1255, 1037, 1038, 1039, 1040, 1041,
+ 1065, 1066, 1072, 1073, 1867, 1868, 1869, 1870, 1704, 1953,
+ 1712, 1713, 1851, 1852, 1725, 1726, 1981, 1982, -2, -2,
-2, 221, 222, 223, 224, 225, 226, 227, 228, 0,
- 1666, 1934, 1935, 217, 0, 0, 285, 286, 282, 283,
- 284, 1078, 1079, 238, 239, 240, 241, 242, 243, 244,
+ 1667, 1935, 1936, 217, 0, 0, 285, 286, 282, 283,
+ 284, 1079, 1080, 238, 239, 240, 241, 242, 243, 244,
245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
- 275, 276, 277, 278, 279, 2008, 0, 817, 0, 0,
- 0, 0, 0, 1327, 0, 1319, 1318, 62, 0, 844,
- -2, 0, 0, 0, 0, 46, 0, 51, 904, 847,
- 76, 75, 1367, 0, 0, 0, 58, 1328, 66, 68,
- 1329, 0, 849, 850, 0, 880, 884, 0, 0, 0,
- 1445, 1444, 1444, 101, 0, 0, 1420, 113, 114, 115,
- 0, 0, 1426, 1427, 1431, 1432, 0, 0, 167, 168,
- 0, 40, 411, 0, 163, 0, 404, 345, 0, 1345,
- 0, 0, 0, 0, 0, 844, 0, 1439, 144, 145,
- 152, 153, 154, 384, 384, 384, 557, 0, 0, 155,
- 155, 515, 516, 517, 0, 0, -2, 409, 0, 495,
+ 275, 276, 277, 278, 279, 2009, 0, 818, 0, 0,
+ 0, 0, 0, 1328, 0, 1320, 1319, 62, 0, 845,
+ -2, 0, 0, 0, 0, 46, 0, 51, 905, 848,
+ 76, 75, 1368, 0, 0, 0, 58, 1329, 66, 68,
+ 1330, 0, 850, 851, 0, 881, 885, 0, 0, 0,
+ 1446, 1445, 1445, 101, 0, 0, 1421, 113, 114, 115,
+ 0, 0, 1427, 1428, 1432, 1433, 0, 0, 167, 168,
+ 0, 40, 411, 0, 163, 0, 404, 345, 0, 1346,
+ 0, 0, 0, 0, 0, 845, 0, 1440, 144, 145,
+ 152, 153, 154, 384, 384, 384, 558, 0, 0, 155,
+ 155, 516, 517, 518, 0, 0, -2, 409, 0, 496,
0, 0, 398, 398, 402, 400, 401, 0, 0, 0,
- 0, 0, 0, 0, 0, 534, 0, 535, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 646, 0, 385,
- 0, 555, 556, 447, 0, 0, 0, 0, 0, 0,
- 0, 0, 1447, 1448, 0, 532, 533, 0, 0, 0,
+ 0, 0, 0, 0, 0, 535, 0, 536, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 647, 0, 385,
+ 0, 556, 557, 447, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1448, 1449, 0, 533, 534, 0, 0, 0,
384, 384, 0, 0, 0, 0, 384, 384, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 143, 1358, 0, 0,
- 0, -2, 0, 690, 0, 0, 0, 1440, 1440, 0,
- 697, 0, 699, 700, 0, 0, 701, 0, 757, 757,
- 755, 756, 703, 704, 705, 706, 760, 0, 0, 393,
- 394, 395, 757, 760, 0, 760, 760, 760, 760, 757,
- 757, 757, 760, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 2009, 763, 760, 0, 734, 0, 735, 736,
- 739, 740, 742, 2010, 2011, 1457, 1458, 1465, 1466, 1467,
- 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477,
- 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487,
- 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497,
- 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507,
- 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517,
- 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527,
- 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537,
- 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547,
- 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557,
- 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567,
- 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577,
- 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587,
- 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597,
- 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607,
- 1608, 1609, 1610, 1611, 1612, 1613, 2009, 2009, 746, 750,
- 1430, 772, 784, 787, 802, 48, 1710, 794, 819, 820,
- 825, 0, 0, 0, 0, 831, 832, 833, 0, 0,
- 836, 837, 838, 0, 0, 0, 0, 0, 967, 0,
- 0, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 986, 987, 0, 0, 0,
- 1009, 1010, 1011, 1012, 1015, 0, 1027, 0, 1029, 1300,
- -2, 0, 0, 0, 1020, 1021, 0, 0, 0, 0,
- 0, 0, 0, 1292, 0, 0, 1108, 0, 1109, 1111,
- 1112, 0, 1113, 854, 854, 854, 854, 854, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 854, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1450,
+ 0, 0, 0, 0, 0, 0, 143, 1359, 0, 0,
+ 0, -2, 0, 691, 0, 0, 0, 1441, 1441, 0,
+ 698, 0, 700, 701, 0, 0, 702, 0, 758, 758,
+ 756, 757, 704, 705, 706, 707, 761, 0, 0, 393,
+ 394, 395, 758, 761, 0, 761, 761, 761, 761, 758,
+ 758, 758, 761, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 2010, 764, 761, 0, 735, 0, 736, 737,
+ 740, 741, 743, 2011, 2012, 1458, 1459, 1466, 1467, 1468,
+ 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478,
+ 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488,
+ 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498,
+ 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508,
+ 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518,
+ 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528,
+ 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538,
+ 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548,
+ 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558,
+ 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568,
+ 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578,
+ 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588,
+ 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598,
+ 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608,
+ 1609, 1610, 1611, 1612, 1613, 1614, 2010, 2010, 747, 751,
+ 1431, 773, 785, 788, 803, 48, 1711, 795, 820, 821,
+ 826, 0, 0, 0, 0, 832, 833, 834, 0, 0,
+ 837, 838, 839, 0, 0, 0, 0, 0, 968, 0,
+ 0, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 987, 988, 0, 0, 0,
+ 1010, 1011, 1012, 1013, 1016, 0, 1028, 0, 1030, 1301,
+ -2, 0, 0, 0, 1021, 1022, 0, 0, 0, 0,
+ 0, 0, 0, 1293, 0, 0, 1109, 0, 1110, 1112,
+ 1113, 0, 1114, 855, 855, 855, 855, 855, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 855, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1451,
131, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 864, 0,
- 0, 864, 864, 0, 0, 210, 211, 212, 213, 214,
+ 0, 0, 0, 0, 0, 0, 0, 0, 865, 0,
+ 0, 865, 865, 0, 0, 210, 211, 212, 213, 214,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 229, 230, 231, 232, 233, 234,
- 287, 235, 236, 237, 1094, 0, 0, 0, 45, 809,
- 810, 0, 930, 1450, 0, 0, 860, 0, 56, 65,
- 67, 1327, 60, 1327, 0, 866, 0, 0, -2, -2,
- 867, 873, 874, 875, 876, 877, 53, 2007, 54, 0,
- 73, 0, 47, 0, 0, 0, 0, 357, 1370, 0,
- 0, 1320, 1321, 1324, 0, 881, 1804, 885, 0, 887,
- 888, 0, 0, 99, 0, 946, 0, 0, 0, 0,
- 1428, 103, 104, 0, 0, 0, 368, 1433, 1434, 1435,
+ 287, 235, 236, 237, 1095, 0, 0, 0, 45, 810,
+ 811, 0, 931, 1451, 0, 0, 861, 0, 56, 65,
+ 67, 1328, 60, 1328, 0, 867, 0, 0, -2, -2,
+ 868, 874, 875, 876, 877, 878, 53, 2008, 54, 0,
+ 73, 0, 47, 0, 0, 0, 0, 357, 1371, 0,
+ 0, 1321, 1322, 1325, 0, 882, 1805, 886, 0, 888,
+ 889, 0, 0, 99, 0, 947, 0, 0, 0, 0,
+ 1429, 103, 104, 0, 0, 0, 368, 1434, 1435, 1436,
-2, 391, 0, 368, 352, 295, 296, 297, 345, 299,
345, 345, 345, 345, 357, 357, 357, 357, 328, 329,
330, 331, 332, 0, 0, 314, 345, 345, 345, 345,
335, 336, 337, 338, 339, 340, 341, 342, 300, 301,
302, 303, 304, 305, 306, 307, 308, 347, 347, 347,
- 349, 349, 0, 41, 0, 372, 0, 1324, 0, 0,
- 1358, 1442, 1452, 0, 0, 0, 1442, 122, 0, 0,
- 0, 558, 596, 509, 546, 559, 0, 512, 513, -2,
- 0, 0, 494, 0, 496, 0, 392, 0, -2, 0,
- 402, 0, 398, 402, 399, 402, 390, 403, 536, 537,
- 538, 0, 540, 541, 626, 916, 0, 0, 0, 0,
- 0, 632, 633, 634, 0, 636, 637, 638, 639, 640,
- 641, 642, 643, 644, 645, 547, 548, 549, 550, 551,
- 552, 553, 554, 0, 0, 0, 0, 496, 0, 543,
+ 349, 349, 0, 41, 0, 372, 0, 1325, 0, 0,
+ 1359, 1443, 1453, 0, 0, 0, 1443, 122, 0, 0,
+ 0, 559, 597, 510, 547, 560, 0, 513, 514, -2,
+ 0, 0, 495, 0, 497, 0, 392, 0, -2, 0,
+ 402, 0, 398, 402, 399, 402, 390, 403, 537, 538,
+ 539, 0, 541, 542, 627, 917, 0, 0, 0, 0,
+ 0, 633, 634, 635, 0, 637, 638, 639, 640, 641,
+ 642, 643, 644, 645, 646, 548, 549, 550, 551, 552,
+ 553, 554, 555, 0, 0, 0, 0, 497, 0, 544,
0, 0, 448, 449, 450, 0, 0, 453, 454, 455,
- 456, 0, 0, 459, 460, 461, 933, 934, 462, 463,
+ 456, 0, 0, 459, 460, 461, 934, 935, 462, 463,
488, 489, 490, 464, 465, 466, 467, 468, 469, 470,
482, 483, 484, 485, 486, 487, 471, 472, 473, 474,
- 475, 476, 479, 0, 137, 1349, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1440, 0, 0, 0, 0, 863, 948, 1463, 1464,
- 761, 762, 0, 396, 397, 760, 760, 707, 747, 0,
- 760, 711, 748, 712, 714, 713, 715, 728, 729, 760,
- 718, 758, 759, 719, 720, 721, 722, 723, 724, 725,
- 743, 730, 731, 732, 764, 0, 768, 769, 744, 745,
- 0, 785, 805, 803, 804, 806, 798, 799, 800, 801,
- 0, 0, 0, 822, 95, 827, 828, 829, 830, 842,
- 835, 1096, 964, 965, 966, 0, 968, 971, 0, 1080,
- 1082, 973, 975, 1091, 1092, 1093, 0, 0, 0, 0,
- 0, 979, 983, 988, 989, 990, 991, 992, 993, 994,
- 995, 996, 997, 998, 999, 1005, 1268, 1269, 1270, 1024,
- 288, 289, 0, 1025, 0, 0, 0, 0, 0, 0,
- 0, 1095, 1026, 0, 878, 0, 0, 1298, 1295, 0,
- 0, 0, 1253, 1255, 0, 0, 0, 0, 855, 856,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 1231, 1232,
- 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242,
- 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1271,
- 0, 0, 0, 0, 0, 1291, 0, 1031, 1032, 1033,
- 0, 0, 0, 0, 0, 0, 1151, 0, 0, 0,
- 0, 1451, 0, 132, 133, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1195, 1196, 1197, 1198, 38, 0, 0, 0, 865,
- 1302, 0, -2, -2, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1220, 0, 0,
- 0, 0, 0, 0, 1414, 0, 0, 812, 813, 815,
- 0, 950, 0, 931, 0, 0, 818, 0, 859, 0,
- 862, 59, 61, 871, 872, 0, 889, 868, 55, 50,
- 0, 0, 908, 1368, 357, 1390, 0, 366, 366, 363,
- 1330, 1331, 0, 1323, 1325, 1326, 78, 886, 882, 0,
- 962, 0, 0, 945, 0, 892, 894, 895, 896, 928,
- 0, 899, 900, 0, 0, 0, 0, 0, 97, 947,
- 1421, 0, 102, 0, 0, 107, 108, 1422, 1423, 1424,
- 1425, 0, 585, -2, 443, 169, 171, 172, 173, 164,
+ 475, 476, 479, 0, 137, 1350, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1441, 0, 0, 0, 0, 864, 949, 1464, 1465,
+ 762, 763, 0, 396, 397, 761, 761, 708, 748, 0,
+ 761, 712, 749, 713, 715, 714, 716, 729, 730, 761,
+ 719, 759, 760, 720, 721, 722, 723, 724, 725, 726,
+ 744, 731, 732, 733, 765, 0, 769, 770, 745, 746,
+ 0, 786, 806, 804, 805, 807, 799, 800, 801, 802,
+ 0, 0, 0, 823, 95, 828, 829, 830, 831, 843,
+ 836, 1097, 965, 966, 967, 0, 969, 972, 0, 1081,
+ 1083, 974, 976, 1092, 1093, 1094, 0, 0, 0, 0,
+ 0, 980, 984, 989, 990, 991, 992, 993, 994, 995,
+ 996, 997, 998, 999, 1000, 1006, 1269, 1270, 1271, 1025,
+ 288, 289, 0, 1026, 0, 0, 0, 0, 0, 0,
+ 0, 1096, 1027, 0, 879, 0, 0, 1299, 1296, 0,
+ 0, 0, 1254, 1256, 0, 0, 0, 0, 856, 857,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1232, 1233,
+ 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243,
+ 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1272,
+ 0, 0, 0, 0, 0, 1292, 0, 1032, 1033, 1034,
+ 0, 0, 0, 0, 0, 0, 1152, 0, 0, 0,
+ 0, 1452, 0, 132, 133, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1196, 1197, 1198, 1199, 38, 0, 0, 0, 866,
+ 1303, 0, -2, -2, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1221, 0, 0,
+ 0, 0, 0, 0, 1415, 0, 0, 813, 814, 816,
+ 0, 951, 0, 932, 0, 0, 819, 0, 860, 0,
+ 863, 59, 61, 872, 873, 0, 890, 869, 55, 50,
+ 0, 0, 909, 1369, 357, 1391, 0, 366, 366, 363,
+ 1331, 1332, 0, 1324, 1326, 1327, 78, 887, 883, 0,
+ 963, 0, 0, 946, 0, 893, 895, 896, 897, 929,
+ 0, 900, 901, 0, 0, 0, 0, 0, 97, 948,
+ 1422, 0, 102, 0, 0, 107, 108, 1423, 1424, 1425,
+ 1426, 0, 586, -2, 443, 169, 171, 172, 173, 164,
-2, 355, 353, 354, 298, 357, 357, 322, 323, 324,
325, 326, 327, 0, 0, 315, 316, 317, 318, 309,
- 0, 310, 311, 312, 0, 313, 410, 0, 1332, 373,
+ 0, 310, 311, 312, 0, 313, 410, 0, 1333, 373,
374, 376, 384, 0, 379, 380, 0, 384, 384, 0,
- 405, 406, 0, 1324, 1349, 0, 0, 0, 1453, 1452,
- 1452, 1452, 0, 157, 158, 159, 160, 161, 162, 621,
- 0, 0, 597, 619, 620, 155, 0, 0, 165, 498,
- 497, 0, 653, 0, 408, 0, 0, 402, 402, 387,
- 388, 539, 0, 0, 628, 629, 630, 631, 0, 0,
- 0, 525, 437, 0, 526, 527, 496, 498, 0, 0,
+ 405, 406, 0, 1325, 1350, 0, 0, 0, 1454, 1453,
+ 1453, 1453, 0, 157, 158, 159, 160, 161, 162, 622,
+ 0, 0, 598, 620, 621, 155, 0, 0, 165, 499,
+ 498, 0, 654, 0, 408, 0, 0, 402, 402, 387,
+ 388, 540, 0, 0, 629, 630, 631, 632, 0, 0,
+ 0, 526, 437, 0, 527, 528, 497, 499, 0, 0,
368, 451, 452, 457, 458, 477, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 572, 573, 574,
- 577, 579, 500, 583, 576, 578, 580, 500, 584, 1346,
- 1347, 1348, 0, 0, 691, 0, 0, 434, 93, 1441,
- 696, 757, 717, 749, 757, 709, 716, 737, 781, 789,
- 790, 791, 792, 793, 826, 0, 0, 0, 0, 834,
- 0, 0, 972, 1081, 1083, 976, 0, 980, 984, 0,
- 0, 0, 1030, 1028, 1302, 0, 0, 0, 1077, 0,
- 0, 1099, 1100, 0, 0, 0, 1296, 0, 0, 1106,
- 0, 1256, 1257, 1114, 0, 0, 0, 0, 0, 1120,
- 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1318,
- 0, 0, 0, 0, 0, 1135, 1136, 1137, 1138, 1139,
- 0, 1141, 0, 1142, 0, 0, 0, 0, 1149, 1150,
- 1152, 0, 0, 1155, 1156, 0, 0, 1157, 0, 0,
- 0, 1161, 0, 0, 0, 0, 1170, 1171, 1172, 1173,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1184, 1185, 0, 1059, 0, 0, 1059, 0, 1097, 864,
- 0, 1258, 1259, 1260, 1261, 1262, 0, 0, 0, 0,
- 0, 0, 1218, 1219, 1221, 0, 0, 1224, 0, 1226,
- 0, 1415, 811, 814, 816, 902, 951, 952, 0, 0,
- 0, 0, 932, 1449, 857, 858, 861, 910, 0, 1306,
- 0, 0, 889, 962, 890, 0, 869, 52, 905, 0,
- 1372, 1371, 1384, 1397, 366, 366, 360, 361, 367, 362,
- 364, 365, 1322, 0, 1327, 0, 1408, 0, 0, 1400,
- 0, 0, 0, 0, 0, 0, 0, 0, 935, 0,
- 0, 938, 0, 0, 0, 0, 929, 900, 0, 901,
+ 0, 0, 0, 0, 0, 0, 0, 573, 574, 575,
+ 578, 580, 501, 584, 577, 579, 581, 501, 585, 1347,
+ 1348, 1349, 0, 0, 692, 0, 0, 434, 93, 1442,
+ 697, 758, 718, 750, 758, 710, 717, 738, 782, 790,
+ 791, 792, 793, 794, 827, 0, 0, 0, 0, 835,
+ 0, 0, 973, 1082, 1084, 977, 0, 981, 985, 0,
+ 0, 0, 1031, 1029, 1303, 0, 0, 0, 1078, 0,
+ 0, 1100, 1101, 0, 0, 0, 1297, 0, 0, 1107,
+ 0, 1257, 1258, 1115, 0, 0, 0, 0, 0, 1121,
+ 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1319,
+ 0, 0, 0, 0, 0, 1136, 1137, 1138, 1139, 1140,
+ 0, 1142, 0, 1143, 0, 0, 0, 0, 1150, 1151,
+ 1153, 0, 0, 1156, 1157, 0, 0, 1158, 0, 0,
+ 0, 1162, 0, 0, 0, 0, 1171, 1172, 1173, 1174,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1185, 1186, 0, 1060, 0, 0, 1060, 0, 1098, 865,
+ 0, 1259, 1260, 1261, 1262, 1263, 0, 0, 0, 0,
+ 0, 0, 1219, 1220, 1222, 0, 0, 1225, 0, 1227,
+ 0, 1416, 812, 815, 817, 903, 952, 953, 0, 0,
+ 0, 0, 933, 1450, 858, 859, 862, 911, 0, 1307,
+ 0, 0, 890, 963, 891, 0, 870, 52, 906, 0,
+ 1373, 1372, 1385, 1398, 366, 366, 360, 361, 367, 362,
+ 364, 365, 1323, 0, 1328, 0, 1409, 0, 0, 1401,
+ 0, 0, 0, 0, 0, 0, 0, 0, 936, 0,
+ 0, 939, 0, 0, 0, 0, 930, 901, 0, 902,
0, -2, 0, 0, 91, 92, 0, 0, 0, 105,
106, 0, 0, 112, 369, 370, 146, 155, 445, 170,
418, 0, 0, 294, 356, 319, 320, 321, 0, 343,
- 0, 0, 0, 439, 118, 1336, 1335, 384, 384, 375,
- 0, 378, 0, 0, 0, 1454, 346, 407, 0, 136,
- 0, 0, 0, 0, 0, 142, 591, 0, 0, 598,
- 0, 0, 0, 507, 0, 518, 519, 0, 625, -2,
- 687, 372, 0, 386, 389, 917, 0, 0, 520, 0,
- 523, 524, 438, 498, 529, 530, 544, 531, 480, 481,
- 478, 0, 0, 1359, 1360, 1365, 1363, 1364, 123, 565,
- 567, 566, 570, 0, 0, 0, 502, 0, 502, 563,
- 0, 434, 1332, 0, 695, 435, 436, 760, 760, 821,
- 96, 0, 824, 0, 0, 0, 0, 977, 981, 1263,
- 1289, 345, 345, 1276, 345, 349, 1279, 345, 1281, 345,
- 1284, 345, 1287, 1288, 0, 0, 0, 879, 0, 0,
- 1105, 1299, 0, 0, 1115, 1116, 1117, 1118, 1119, 1293,
- 0, 0, 0, 1134, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 439, 118, 1337, 1336, 384, 384, 375,
+ 0, 378, 0, 0, 0, 1455, 346, 407, 0, 136,
+ 0, 0, 0, 0, 0, 142, 592, 0, 0, 599,
+ 0, 0, 0, 508, 0, 519, 520, 0, 626, -2,
+ 688, 372, 0, 386, 389, 918, 0, 0, 521, 0,
+ 524, 525, 438, 499, 530, 531, 545, 532, 480, 481,
+ 478, 0, 0, 1360, 1361, 1366, 1364, 1365, 123, 566,
+ 568, 567, 571, 0, 0, 0, 503, 0, 503, 564,
+ 0, 434, 1333, 0, 696, 435, 436, 761, 761, 822,
+ 96, 0, 825, 0, 0, 0, 0, 978, 982, 1264,
+ 1290, 345, 345, 1277, 345, 349, 1280, 345, 1282, 345,
+ 1285, 345, 1288, 1289, 0, 0, 0, 880, 0, 0,
+ 1106, 1300, 0, 0, 1116, 1117, 1118, 1119, 1120, 1294,
+ 0, 0, 0, 1135, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 134, 135, 0, 0, 0, 0,
- 0, 0, 1229, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1054, 1058, 0, 1060, 1061, 0, 0,
- 1187, 0, 0, 1199, 0, 1303, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 953, 958, 958, 958,
- 0, 0, 0, 1436, 1437, 1307, 1308, 962, 1309, 891,
- 870, 909, 1390, 0, 1383, 0, -2, 1392, 0, 0,
- 0, 1398, 358, 359, 883, 79, 963, 82, 0, 1408,
- 1417, 0, 1399, 1410, 1412, 0, 0, 0, 1404, 0,
- 962, 893, 924, 926, 0, 921, 936, 937, 939, 0,
- 941, 0, 943, 944, 904, 898, 0, 99, 0, 962,
- 962, 98, 0, 949, 109, 110, 111, 444, 174, 179,
+ 0, 0, 1230, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1055, 1059, 0, 1061, 1062, 0, 0,
+ 1188, 0, 0, 1200, 0, 1304, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 954, 959, 959, 959,
+ 0, 0, 0, 1437, 1438, 1308, 1309, 963, 1310, 892,
+ 871, 910, 1391, 0, 1384, 0, -2, 1393, 0, 0,
+ 0, 1399, 358, 359, 884, 79, 964, 82, 0, 1409,
+ 1418, 0, 1400, 1411, 1413, 0, 0, 0, 1405, 0,
+ 963, 894, 925, 927, 0, 922, 937, 938, 940, 0,
+ 942, 0, 944, 945, 905, 899, 0, 99, 0, 963,
+ 963, 98, 0, 950, 109, 110, 111, 444, 174, 179,
0, 0, 0, 184, 0, 186, 0, 0, 0, 191,
192, 384, 384, 419, 0, 291, 293, 0, 0, 177,
- 357, 0, 357, 0, 350, 0, 420, 440, 1333, 1334,
- 0, 0, 377, 381, 382, 383, 0, 1443, 138, 0,
- 0, 0, 594, 0, 622, 0, 0, 0, 0, 0,
- 0, 166, 499, 654, 655, 656, 657, 658, 659, 660,
- 661, 662, 0, 384, 0, 0, 0, 384, 384, 384,
- 0, 679, 371, 0, 0, 650, 647, 521, 0, 215,
- 216, 218, 0, 0, 0, 0, 0, 528, 904, 1350,
- 1351, 1352, 0, 1362, 1366, 126, 0, 0, 0, 0,
- 575, 581, 0, 501, 582, 692, 693, 694, 94, 702,
- 708, 823, 843, 970, 978, 982, 0, 0, 0, 0,
- 1290, 1274, 357, 1277, 1278, 1280, 1282, 1283, 1285, 1286,
- 1018, 1019, 1023, 0, 1102, 0, 1104, 1297, 0, 1327,
- 0, 0, 0, 1133, 0, 0, 0, 1144, 1143, 1145,
- 0, 1147, 1148, 1153, 1154, 1158, 0, 1160, 1162, 1163,
- 0, 0, 0, 1174, 1175, 1176, 1177, 1178, 1179, 1180,
- 1181, 1182, 1183, 0, 1052, 1055, 1186, 1062, 1063, 1068,
- 1189, 0, 0, 1098, 1201, 0, 1206, 0, 0, 1212,
- 0, 1216, 0, 1222, 1223, 1225, 1227, 0, 0, 0,
- 0, 0, 930, 911, 63, 1309, 1311, 0, 1377, 1375,
- 1375, 1385, 1386, 0, 0, 1393, 0, 0, 0, 0,
- 83, 0, 0, 0, 1413, 0, 0, 0, 0, 100,
- 1318, 918, 925, 0, 0, 919, 0, 920, 940, 942,
- 897, 0, 962, 962, 89, 90, 0, 180, 0, 182,
+ 357, 0, 357, 0, 350, 0, 420, 440, 1334, 1335,
+ 0, 0, 377, 381, 382, 383, 0, 1444, 138, 0,
+ 0, 0, 595, 0, 623, 0, 0, 0, 0, 0,
+ 0, 166, 500, 655, 656, 657, 658, 659, 660, 661,
+ 662, 663, 0, 384, 0, 0, 0, 384, 384, 384,
+ 0, 680, 371, 0, 0, 651, 648, 522, 0, 215,
+ 216, 218, 0, 0, 0, 0, 0, 529, 905, 1351,
+ 1352, 1353, 0, 1363, 1367, 126, 0, 0, 0, 0,
+ 576, 582, 0, 502, 583, 693, 694, 695, 94, 703,
+ 709, 824, 844, 971, 979, 983, 0, 0, 0, 0,
+ 1291, 1275, 357, 1278, 1279, 1281, 1283, 1284, 1286, 1287,
+ 1019, 1020, 1024, 0, 1103, 0, 1105, 1298, 0, 1328,
+ 0, 0, 0, 1134, 0, 0, 0, 1145, 1144, 1146,
+ 0, 1148, 1149, 1154, 1155, 1159, 0, 1161, 1163, 1164,
+ 0, 0, 0, 1175, 1176, 1177, 1178, 1179, 1180, 1181,
+ 1182, 1183, 1184, 0, 1053, 1056, 1187, 1063, 1064, 1069,
+ 1190, 0, 0, 1099, 1202, 0, 1207, 0, 0, 1213,
+ 0, 1217, 0, 1223, 1224, 1226, 1228, 0, 0, 0,
+ 0, 0, 931, 912, 63, 1310, 1312, 0, 1378, 1376,
+ 1376, 1386, 1387, 0, 0, 1394, 0, 0, 0, 0,
+ 83, 0, 0, 0, 1414, 0, 0, 0, 0, 100,
+ 1319, 919, 926, 0, 0, 920, 0, 921, 941, 943,
+ 898, 0, 963, 963, 89, 90, 0, 180, 0, 182,
208, 209, 0, 185, 187, 188, 189, 195, 196, 197,
190, 0, 0, 290, 292, 0, 0, 333, 344, 334,
- 0, 0, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344,
- 904, 139, 140, 141, 586, 0, 596, 0, 906, 0,
- 589, 0, 510, 0, 0, 0, 384, 384, 384, 0,
- 0, 0, 0, 664, 0, 0, 627, 0, 635, 0,
- 0, 0, 219, 220, 0, 1361, 564, 0, 124, 125,
- 0, 0, 569, 503, 504, 1016, 0, 0, 0, 1017,
- 1275, 0, 0, 0, 0, 1294, 0, 0, 0, 0,
- 1140, 0, 0, 0, 1166, 0, 0, 0, 616, 617,
- 0, 1230, 1057, 1318, 0, 1059, 1069, 1070, 0, 1059,
- 1200, 0, 0, 0, 0, 0, 0, 0, 959, 0,
- 0, 0, 0, 950, 1311, 1316, 0, 0, 1380, 0,
- 1373, 1376, 1374, 1387, 0, 0, 1394, 0, 1396, 0,
- 1418, 1419, 1411, 0, 1403, 1406, 1402, 1405, 1327, 922,
- 0, 927, 0, 1318, 88, 0, 183, 0, 0, 0,
+ 0, 0, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345,
+ 905, 139, 140, 141, 587, 0, 597, 0, 907, 0,
+ 590, 0, 511, 0, 0, 0, 384, 384, 384, 0,
+ 0, 0, 0, 665, 0, 0, 628, 0, 636, 0,
+ 0, 0, 219, 220, 0, 1362, 565, 0, 124, 125,
+ 0, 0, 570, 504, 505, 1017, 0, 0, 0, 1018,
+ 1276, 0, 0, 0, 0, 1295, 0, 0, 0, 0,
+ 1141, 0, 0, 0, 1167, 0, 0, 0, 617, 618,
+ 0, 1231, 1058, 1319, 0, 1060, 1070, 1071, 0, 1060,
+ 1201, 0, 0, 0, 0, 0, 0, 0, 960, 0,
+ 0, 0, 0, 951, 1312, 1317, 0, 0, 1381, 0,
+ 1374, 1377, 1375, 1388, 0, 0, 1395, 0, 1397, 0,
+ 1419, 1420, 1412, 0, 1404, 1407, 1403, 1406, 1328, 923,
+ 0, 928, 0, 1319, 88, 0, 183, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -8472,37 +8393,37 @@ var yyDef = [...]int{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 193, 194, 0,
- 0, 348, 351, 0, 0, 0, 587, 0, 907, 599,
- 590, 0, 677, 0, 681, 0, 0, 0, 684, 685,
- 686, 663, 0, 667, 412, 651, 648, 649, 522, 0,
- 127, 128, 0, 0, 0, 1264, 0, 1267, 1101, 1103,
- 0, 1130, 1131, 1132, 1272, 1273, 1146, 1159, 1164, 0,
- 1167, 0, 0, 1168, 0, 618, 1048, 0, 0, 1066,
- 1067, 0, 1202, 0, 1207, 1208, 0, 1213, 0, 1217,
- 1228, 0, 955, 912, 913, 960, 961, 0, 0, 903,
- 1316, 81, 1317, 1314, 0, 1312, 1310, 1369, 0, 1378,
- 1379, 1388, 1389, 1395, 0, 1401, 0, 86, 0, 0,
- 0, 1327, 181, 0, 200, 0, 595, 0, 598, 588,
- 675, 676, 0, 688, 680, 682, 683, 665, -2, 1353,
- 0, 0, 0, 571, 1265, 0, 0, 1169, 0, 614,
- 615, 1056, 1049, 0, 1034, 1035, 1053, 1188, 1190, 0,
- 0, 0, 0, 954, 956, 957, 80, 0, 1313, 1074,
- 0, 1381, 1382, 1409, 1407, 923, 930, 0, 87, 425,
- 418, 1353, 0, 0, 0, 668, 669, 670, 671, 672,
- 673, 674, 561, 1355, 129, 130, 491, 492, 493, 123,
- 0, 1107, 1165, 1050, 0, 0, 0, 0, 1046, 1047,
- 0, 1203, 0, 1209, 0, 1214, 0, 914, 915, 1315,
- 0, 0, 600, 0, 602, 0, -2, 413, 426, 0,
- 175, 201, 202, 0, 0, 205, 206, 207, 198, 199,
- 119, 0, 0, 689, 0, 1356, 1357, 126, 0, 0,
- 1041, 1042, 1043, 1044, 1045, 0, 0, 0, 1075, 1054,
- 601, 0, 0, 368, 0, 611, 414, 415, 0, 421,
- 422, 423, 424, 203, 204, 623, 0, 0, 568, 1266,
- 0, 1204, 0, 1210, 0, 1215, 0, 603, 604, 612,
- 0, 416, 0, 417, 0, 0, 0, 592, 0, 623,
- 1354, 1051, 0, 0, 1073, 0, 613, 609, 427, 429,
- 430, 0, 0, 428, 624, 593, 1205, 1211, 0, 431,
- 432, 433, 605, 606, 607, 608,
+ 0, 348, 351, 0, 0, 0, 588, 0, 908, 600,
+ 591, 0, 678, 0, 682, 0, 0, 0, 685, 686,
+ 687, 664, 0, 668, 412, 652, 649, 650, 523, 0,
+ 127, 128, 0, 0, 0, 1265, 0, 1268, 1102, 1104,
+ 0, 1131, 1132, 1133, 1273, 1274, 1147, 1160, 1165, 0,
+ 1168, 0, 0, 1169, 0, 619, 1049, 0, 0, 1067,
+ 1068, 0, 1203, 0, 1208, 1209, 0, 1214, 0, 1218,
+ 1229, 0, 956, 913, 914, 961, 962, 0, 0, 904,
+ 1317, 81, 1318, 1315, 0, 1313, 1311, 1370, 0, 1379,
+ 1380, 1389, 1390, 1396, 0, 1402, 0, 86, 0, 0,
+ 0, 1328, 181, 0, 200, 0, 596, 0, 599, 589,
+ 676, 677, 0, 689, 681, 683, 684, 666, -2, 1354,
+ 0, 0, 0, 572, 1266, 0, 0, 1170, 0, 615,
+ 616, 1057, 1050, 0, 1035, 1036, 1054, 1189, 1191, 0,
+ 0, 0, 0, 955, 957, 958, 80, 0, 1314, 1075,
+ 0, 1382, 1383, 1410, 1408, 924, 931, 0, 87, 425,
+ 418, 1354, 0, 0, 0, 669, 670, 671, 672, 673,
+ 674, 675, 562, 1356, 129, 130, 0, 492, 493, 494,
+ 123, 0, 1108, 1166, 1051, 0, 0, 0, 0, 1047,
+ 1048, 0, 1204, 0, 1210, 0, 1215, 0, 915, 916,
+ 1316, 0, 0, 601, 0, 603, 0, -2, 413, 426,
+ 0, 175, 201, 202, 0, 0, 205, 206, 207, 198,
+ 199, 119, 0, 0, 690, 0, 1357, 1358, 0, 126,
+ 0, 0, 1042, 1043, 1044, 1045, 1046, 0, 0, 0,
+ 1076, 1055, 602, 0, 0, 368, 0, 612, 414, 415,
+ 0, 421, 422, 423, 424, 203, 204, 624, 0, 0,
+ 491, 569, 1267, 0, 1205, 0, 1211, 0, 1216, 0,
+ 604, 605, 613, 0, 416, 0, 417, 0, 0, 0,
+ 593, 0, 624, 1355, 1052, 0, 0, 1074, 0, 614,
+ 610, 427, 429, 430, 0, 0, 428, 625, 594, 1206,
+ 1212, 0, 431, 432, 433, 606, 607, 608, 609,
}
var yyTok1 = [...]int{
@@ -8977,7 +8898,7 @@ yydefault:
case 1:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:598
+//line sql.y:586
{
stmt := yyDollar[2].statementUnion()
// If the statement is empty and we have comments
@@ -8991,58 +8912,58 @@ yydefault:
}
case 2:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:611
+//line sql.y:599
{
}
case 3:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:612
+//line sql.y:600
{
}
case 4:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:616
+//line sql.y:604
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
case 37:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:652
+//line sql.y:640
{
setParseTree(yylex, nil)
}
case 38:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:658
+//line sql.y:646
{
yyLOCAL = NewVariableExpression(yyDollar[1].str, SingleAt)
}
yyVAL.union = yyLOCAL
case 39:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:664
+//line sql.y:652
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
case 40:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:669
+//line sql.y:657
{
yyVAL.identifierCI = NewIdentifierCI("")
}
case 41:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:673
+//line sql.y:661
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
case 42:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:679
+//line sql.y:667
{
yyLOCAL = NewVariableExpression(string(yyDollar[1].str), SingleAt)
}
@@ -9050,7 +8971,7 @@ yydefault:
case 43:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:683
+//line sql.y:671
{
yyLOCAL = NewVariableExpression(string(yyDollar[1].str), DoubleAt)
}
@@ -9058,7 +8979,7 @@ yydefault:
case 44:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:689
+//line sql.y:677
{
yyLOCAL = &OtherAdmin{}
}
@@ -9066,7 +8987,7 @@ yydefault:
case 45:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:695
+//line sql.y:683
{
yyLOCAL = &Load{}
}
@@ -9074,7 +8995,7 @@ yydefault:
case 46:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *With
-//line sql.y:701
+//line sql.y:689
{
yyLOCAL = &With{ctes: yyDollar[2].ctesUnion(), Recursive: false}
}
@@ -9082,7 +9003,7 @@ yydefault:
case 47:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *With
-//line sql.y:705
+//line sql.y:693
{
yyLOCAL = &With{ctes: yyDollar[3].ctesUnion(), Recursive: true}
}
@@ -9090,7 +9011,7 @@ yydefault:
case 48:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *With
-//line sql.y:710
+//line sql.y:698
{
yyLOCAL = nil
}
@@ -9098,14 +9019,14 @@ yydefault:
case 49:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *With
-//line sql.y:714
+//line sql.y:702
{
yyLOCAL = yyDollar[1].withUnion()
}
yyVAL.union = yyLOCAL
case 50:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:720
+//line sql.y:708
{
yySLICE := (*[]*CommonTableExpr)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].cteUnion())
@@ -9113,7 +9034,7 @@ yydefault:
case 51:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*CommonTableExpr
-//line sql.y:724
+//line sql.y:712
{
yyLOCAL = []*CommonTableExpr{yyDollar[1].cteUnion()}
}
@@ -9121,7 +9042,7 @@ yydefault:
case 52:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *CommonTableExpr
-//line sql.y:730
+//line sql.y:718
{
yyLOCAL = &CommonTableExpr{ID: yyDollar[1].identifierCS, Columns: yyDollar[2].columnsUnion(), Subquery: yyDollar[4].subqueryUnion()}
}
@@ -9129,7 +9050,7 @@ yydefault:
case 53:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:736
+//line sql.y:724
{
yyLOCAL = yyDollar[2].selStmtUnion()
}
@@ -9137,7 +9058,7 @@ yydefault:
case 54:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:740
+//line sql.y:728
{
yyLOCAL = yyDollar[2].selStmtUnion()
}
@@ -9145,7 +9066,7 @@ yydefault:
case 55:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:744
+//line sql.y:732
{
setLockInSelect(yyDollar[2].selStmtUnion(), yyDollar[3].lockUnion())
yyLOCAL = yyDollar[2].selStmtUnion()
@@ -9154,7 +9075,7 @@ yydefault:
case 56:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:767
+//line sql.y:755
{
yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion())
yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion())
@@ -9164,7 +9085,7 @@ yydefault:
case 57:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:773
+//line sql.y:761
{
yyDollar[1].selStmtUnion().SetLimit(yyDollar[2].limitUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
@@ -9173,7 +9094,7 @@ yydefault:
case 58:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:778
+//line sql.y:766
{
yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion())
yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion())
@@ -9183,7 +9104,7 @@ yydefault:
case 59:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:784
+//line sql.y:772
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion())
@@ -9194,7 +9115,7 @@ yydefault:
case 60:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:791
+//line sql.y:779
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
yyDollar[2].selStmtUnion().SetLimit(yyDollar[3].limitUnion())
@@ -9204,7 +9125,7 @@ yydefault:
case 61:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:797
+//line sql.y:785
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion())
@@ -9214,14 +9135,14 @@ yydefault:
yyVAL.union = yyLOCAL
case 62:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:804
+//line sql.y:792
{
yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion())
}
case 63:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:808
+//line sql.y:796
{
yyLOCAL = NewSelect(Comments(yyDollar[2].strs), SelectExprs{&Nextval{Expr: yyDollar[5].exprUnion()}}, []string{yyDollar[3].str} /*options*/, nil, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/, nil)
}
@@ -9229,7 +9150,7 @@ yydefault:
case 64:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:814
+//line sql.y:802
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
@@ -9237,7 +9158,7 @@ yydefault:
case 65:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:818
+//line sql.y:806
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
@@ -9245,7 +9166,7 @@ yydefault:
case 66:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:822
+//line sql.y:810
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
@@ -9253,7 +9174,7 @@ yydefault:
case 67:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:826
+//line sql.y:814
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
@@ -9261,7 +9182,7 @@ yydefault:
case 68:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:830
+//line sql.y:818
{
yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()}
}
@@ -9269,7 +9190,7 @@ yydefault:
case 69:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:836
+//line sql.y:824
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
@@ -9277,7 +9198,7 @@ yydefault:
case 70:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:840
+//line sql.y:828
{
setLockInSelect(yyDollar[1].selStmtUnion(), yyDollar[2].lockUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
@@ -9286,7 +9207,7 @@ yydefault:
case 71:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:845
+//line sql.y:833
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
@@ -9294,7 +9215,7 @@ yydefault:
case 72:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:849
+//line sql.y:837
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
@@ -9302,7 +9223,7 @@ yydefault:
case 73:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:855
+//line sql.y:843
{
yyLOCAL = yyDollar[2].selStmtUnion()
}
@@ -9310,7 +9231,7 @@ yydefault:
case 74:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:859
+//line sql.y:847
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
@@ -9319,7 +9240,7 @@ yydefault:
case 75:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:864
+//line sql.y:852
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion())
yyDollar[1].selStmtUnion().SetLock(yyDollar[3].lockUnion())
@@ -9329,7 +9250,7 @@ yydefault:
case 76:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:870
+//line sql.y:858
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[3].selectIntoUnion())
yyDollar[1].selStmtUnion().SetLock(yyDollar[2].lockUnion())
@@ -9339,7 +9260,7 @@ yydefault:
case 77:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:876
+//line sql.y:864
{
yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion())
yyLOCAL = yyDollar[1].selStmtUnion()
@@ -9348,7 +9269,7 @@ yydefault:
case 78:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:883
+//line sql.y:871
{
yyLOCAL = &Stream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName}
}
@@ -9356,7 +9277,7 @@ yydefault:
case 79:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:889
+//line sql.y:877
{
yyLOCAL = &VStream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].exprUnion()), Limit: yyDollar[7].limitUnion()}
}
@@ -9364,7 +9285,7 @@ yydefault:
case 80:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:897
+//line sql.y:885
{
yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].selectIntoUnion() /*into*/, yyDollar[6].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[7].exprUnion()), GroupBy(yyDollar[8].exprsUnion()), NewWhere(HavingClause, yyDollar[9].exprUnion()), yyDollar[10].namedWindowsUnion())
}
@@ -9372,7 +9293,7 @@ yydefault:
case 81:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL SelectStatement
-//line sql.y:901
+//line sql.y:889
{
yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, nil, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), GroupBy(yyDollar[7].exprsUnion()), NewWhere(HavingClause, yyDollar[8].exprUnion()), yyDollar[9].namedWindowsUnion())
}
@@ -9380,7 +9301,7 @@ yydefault:
case 82:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:907
+//line sql.y:895
{
// insert_data returns a *Insert pre-filled with Columns & Values
ins := yyDollar[6].insUnion()
@@ -9396,7 +9317,7 @@ yydefault:
case 83:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:919
+//line sql.y:907
{
cols := make(Columns, 0, len(yyDollar[7].updateExprsUnion()))
vals := make(ValTuple, 0, len(yyDollar[8].updateExprsUnion()))
@@ -9410,7 +9331,7 @@ yydefault:
case 84:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL InsertAction
-//line sql.y:931
+//line sql.y:919
{
yyLOCAL = InsertAct
}
@@ -9418,7 +9339,7 @@ yydefault:
case 85:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL InsertAction
-//line sql.y:935
+//line sql.y:923
{
yyLOCAL = ReplaceAct
}
@@ -9426,7 +9347,7 @@ yydefault:
case 86:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Statement
-//line sql.y:941
+//line sql.y:929
{
yyLOCAL = &Update{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: yyDollar[5].tableExprsUnion(), Exprs: yyDollar[7].updateExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion()), OrderBy: yyDollar[9].orderByUnion(), Limit: yyDollar[10].limitUnion()}
}
@@ -9434,7 +9355,7 @@ yydefault:
case 87:
yyDollar = yyS[yypt-11 : yypt+1]
var yyLOCAL Statement
-//line sql.y:947
+//line sql.y:935
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[6].tableName, As: yyDollar[7].identifierCS}}, Partitions: yyDollar[8].partitionsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion()), OrderBy: yyDollar[10].orderByUnion(), Limit: yyDollar[11].limitUnion()}
}
@@ -9442,7 +9363,7 @@ yydefault:
case 88:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Statement
-//line sql.y:951
+//line sql.y:939
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[6].tableNamesUnion(), TableExprs: yyDollar[8].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion())}
}
@@ -9450,7 +9371,7 @@ yydefault:
case 89:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:955
+//line sql.y:943
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())}
}
@@ -9458,32 +9379,32 @@ yydefault:
case 90:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:959
+//line sql.y:947
{
yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())}
}
yyVAL.union = yyLOCAL
case 91:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:964
+//line sql.y:952
{
}
case 92:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:965
+//line sql.y:953
{
}
case 93:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableNames
-//line sql.y:969
+//line sql.y:957
{
yyLOCAL = TableNames{yyDollar[1].tableName.ToViewName()}
}
yyVAL.union = yyLOCAL
case 94:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:973
+//line sql.y:961
{
yySLICE := (*TableNames)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableName.ToViewName())
@@ -9491,14 +9412,14 @@ yydefault:
case 95:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableNames
-//line sql.y:979
+//line sql.y:967
{
yyLOCAL = TableNames{yyDollar[1].tableName}
}
yyVAL.union = yyLOCAL
case 96:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:983
+//line sql.y:971
{
yySLICE := (*TableNames)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableName)
@@ -9506,14 +9427,14 @@ yydefault:
case 97:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableNames
-//line sql.y:989
+//line sql.y:977
{
yyLOCAL = TableNames{yyDollar[1].tableName}
}
yyVAL.union = yyLOCAL
case 98:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:993
+//line sql.y:981
{
yySLICE := (*TableNames)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableName)
@@ -9521,7 +9442,7 @@ yydefault:
case 99:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Partitions
-//line sql.y:998
+//line sql.y:986
{
yyLOCAL = nil
}
@@ -9529,7 +9450,7 @@ yydefault:
case 100:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Partitions
-//line sql.y:1002
+//line sql.y:990
{
yyLOCAL = yyDollar[3].partitionsUnion()
}
@@ -9537,7 +9458,7 @@ yydefault:
case 101:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1008
+//line sql.y:996
{
yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[3].setExprsUnion())
}
@@ -9545,7 +9466,7 @@ yydefault:
case 102:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1014
+//line sql.y:1002
{
yyLOCAL = &SetTransaction{Comments: Comments(yyDollar[2].strs).Parsed(), Scope: yyDollar[3].scopeUnion(), Characteristics: yyDollar[5].characteristicsUnion()}
}
@@ -9553,7 +9474,7 @@ yydefault:
case 103:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1018
+//line sql.y:1006
{
yyLOCAL = &SetTransaction{Comments: Comments(yyDollar[2].strs).Parsed(), Characteristics: yyDollar[4].characteristicsUnion(), Scope: NoScope}
}
@@ -9561,14 +9482,14 @@ yydefault:
case 104:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []Characteristic
-//line sql.y:1024
+//line sql.y:1012
{
yyLOCAL = []Characteristic{yyDollar[1].characteristicUnion()}
}
yyVAL.union = yyLOCAL
case 105:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1028
+//line sql.y:1016
{
yySLICE := (*[]Characteristic)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].characteristicUnion())
@@ -9576,7 +9497,7 @@ yydefault:
case 106:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Characteristic
-//line sql.y:1034
+//line sql.y:1022
{
yyLOCAL = yyDollar[3].isolationLevelUnion()
}
@@ -9584,7 +9505,7 @@ yydefault:
case 107:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Characteristic
-//line sql.y:1038
+//line sql.y:1026
{
yyLOCAL = ReadWrite
}
@@ -9592,7 +9513,7 @@ yydefault:
case 108:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Characteristic
-//line sql.y:1042
+//line sql.y:1030
{
yyLOCAL = ReadOnly
}
@@ -9600,7 +9521,7 @@ yydefault:
case 109:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsolationLevel
-//line sql.y:1048
+//line sql.y:1036
{
yyLOCAL = RepeatableRead
}
@@ -9608,7 +9529,7 @@ yydefault:
case 110:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsolationLevel
-//line sql.y:1052
+//line sql.y:1040
{
yyLOCAL = ReadCommitted
}
@@ -9616,7 +9537,7 @@ yydefault:
case 111:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsolationLevel
-//line sql.y:1056
+//line sql.y:1044
{
yyLOCAL = ReadUncommitted
}
@@ -9624,7 +9545,7 @@ yydefault:
case 112:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IsolationLevel
-//line sql.y:1060
+//line sql.y:1048
{
yyLOCAL = Serializable
}
@@ -9632,7 +9553,7 @@ yydefault:
case 113:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Scope
-//line sql.y:1066
+//line sql.y:1054
{
yyLOCAL = SessionScope
}
@@ -9640,7 +9561,7 @@ yydefault:
case 114:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Scope
-//line sql.y:1070
+//line sql.y:1058
{
yyLOCAL = SessionScope
}
@@ -9648,7 +9569,7 @@ yydefault:
case 115:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Scope
-//line sql.y:1074
+//line sql.y:1062
{
yyLOCAL = GlobalScope
}
@@ -9656,7 +9577,7 @@ yydefault:
case 116:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1080
+//line sql.y:1068
{
yyDollar[1].createTableUnion().TableSpec = yyDollar[2].tableSpecUnion()
yyDollar[1].createTableUnion().FullyParsed = true
@@ -9666,7 +9587,7 @@ yydefault:
case 117:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1086
+//line sql.y:1074
{
// Create table [name] like [name]
yyDollar[1].createTableUnion().OptLike = yyDollar[2].optLikeUnion()
@@ -9677,7 +9598,7 @@ yydefault:
case 118:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1093
+//line sql.y:1081
{
indexDef := yyDollar[1].alterTableUnion().AlterOptions[0].(*AddIndexDefinition).IndexDefinition
indexDef.Columns = yyDollar[3].indexColumnsUnion()
@@ -9690,7 +9611,7 @@ yydefault:
case 119:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1102
+//line sql.y:1090
{
yyLOCAL = &CreateView{ViewName: yyDollar[8].tableName.ToViewName(), Comments: Comments(yyDollar[2].strs).Parsed(), IsReplace: yyDollar[3].booleanUnion(), Algorithm: yyDollar[4].str, Definer: yyDollar[5].definerUnion(), Security: yyDollar[6].str, Columns: yyDollar[9].columnsUnion(), Select: yyDollar[11].selStmtUnion(), CheckOption: yyDollar[12].str}
}
@@ -9698,7 +9619,7 @@ yydefault:
case 120:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:1106
+//line sql.y:1094
{
yyDollar[1].createDatabaseUnion().FullyParsed = true
yyDollar[1].createDatabaseUnion().CreateOptions = yyDollar[2].databaseOptionsUnion()
@@ -9708,7 +9629,7 @@ yydefault:
case 121:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:1113
+//line sql.y:1101
{
yyLOCAL = false
}
@@ -9716,33 +9637,33 @@ yydefault:
case 122:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:1117
+//line sql.y:1105
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
case 123:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:1122
+//line sql.y:1110
{
yyVAL.identifierCI = NewIdentifierCI("")
}
case 124:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1126
+//line sql.y:1114
{
yyVAL.identifierCI = yyDollar[2].identifierCI
}
case 125:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1132
+//line sql.y:1120
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
case 126:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []VindexParam
-//line sql.y:1137
+//line sql.y:1125
{
var v []VindexParam
yyLOCAL = v
@@ -9751,7 +9672,7 @@ yydefault:
case 127:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []VindexParam
-//line sql.y:1142
+//line sql.y:1130
{
yyLOCAL = yyDollar[2].vindexParamsUnion()
}
@@ -9759,7 +9680,7 @@ yydefault:
case 128:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []VindexParam
-//line sql.y:1148
+//line sql.y:1136
{
yyLOCAL = make([]VindexParam, 0, 4)
yyLOCAL = append(yyLOCAL, yyDollar[1].vindexParam)
@@ -9767,21 +9688,21 @@ yydefault:
yyVAL.union = yyLOCAL
case 129:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1153
+//line sql.y:1141
{
yySLICE := (*[]VindexParam)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].vindexParam)
}
case 130:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1159
+//line sql.y:1147
{
yyVAL.vindexParam = VindexParam{Key: yyDollar[1].identifierCI, Val: yyDollar[3].str}
}
case 131:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*JSONObjectParam
-//line sql.y:1164
+//line sql.y:1152
{
yyLOCAL = nil
}
@@ -9789,7 +9710,7 @@ yydefault:
case 132:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*JSONObjectParam
-//line sql.y:1168
+//line sql.y:1156
{
yyLOCAL = yyDollar[1].jsonObjectParamsUnion()
}
@@ -9797,28 +9718,28 @@ yydefault:
case 133:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*JSONObjectParam
-//line sql.y:1174
+//line sql.y:1162
{
yyLOCAL = []*JSONObjectParam{yyDollar[1].jsonObjectParam}
}
yyVAL.union = yyLOCAL
case 134:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1178
+//line sql.y:1166
{
yySLICE := (*[]*JSONObjectParam)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].jsonObjectParam)
}
case 135:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1184
+//line sql.y:1172
{
yyVAL.jsonObjectParam = &JSONObjectParam{Key: yyDollar[1].exprUnion(), Value: yyDollar[3].exprUnion()}
}
case 136:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *CreateTable
-//line sql.y:1190
+//line sql.y:1178
{
yyLOCAL = &CreateTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[6].tableName, IfNotExists: yyDollar[5].booleanUnion(), Temp: yyDollar[3].booleanUnion()}
setDDL(yylex, yyLOCAL)
@@ -9827,7 +9748,7 @@ yydefault:
case 137:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1197
+//line sql.y:1185
{
yyLOCAL = &AlterTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[4].tableName}
setDDL(yylex, yyLOCAL)
@@ -9836,7 +9757,7 @@ yydefault:
case 138:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1204
+//line sql.y:1192
{
yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].identifierCI, Type: string(yyDollar[3].str)}, Options: yyDollar[5].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
@@ -9845,7 +9766,7 @@ yydefault:
case 139:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1209
+//line sql.y:1197
{
yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Fulltext: true}, Options: yyDollar[6].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
@@ -9854,7 +9775,7 @@ yydefault:
case 140:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1214
+//line sql.y:1202
{
yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Spatial: true}, Options: yyDollar[6].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
@@ -9863,7 +9784,7 @@ yydefault:
case 141:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *AlterTable
-//line sql.y:1219
+//line sql.y:1207
{
yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Unique: true}, Options: yyDollar[6].indexOptionsUnion()}}}}
setDDL(yylex, yyLOCAL)
@@ -9872,7 +9793,7 @@ yydefault:
case 142:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *CreateDatabase
-//line sql.y:1226
+//line sql.y:1214
{
yyLOCAL = &CreateDatabase{Comments: Comments(yyDollar[4].strs).Parsed(), DBName: yyDollar[6].identifierCS, IfNotExists: yyDollar[5].booleanUnion()}
setDDL(yylex, yyLOCAL)
@@ -9881,7 +9802,7 @@ yydefault:
case 143:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *AlterDatabase
-//line sql.y:1233
+//line sql.y:1221
{
yyLOCAL = &AlterDatabase{}
setDDL(yylex, yyLOCAL)
@@ -9890,7 +9811,7 @@ yydefault:
case 146:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *TableSpec
-//line sql.y:1244
+//line sql.y:1232
{
yyLOCAL = yyDollar[2].tableSpecUnion()
yyLOCAL.Options = yyDollar[4].tableOptionsUnion()
@@ -9900,7 +9821,7 @@ yydefault:
case 147:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1251
+//line sql.y:1239
{
yyLOCAL = nil
}
@@ -9908,7 +9829,7 @@ yydefault:
case 148:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1255
+//line sql.y:1243
{
yyLOCAL = yyDollar[1].databaseOptionsUnion()
}
@@ -9916,7 +9837,7 @@ yydefault:
case 149:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1261
+//line sql.y:1249
{
yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption}
}
@@ -9924,7 +9845,7 @@ yydefault:
case 150:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1265
+//line sql.y:1253
{
yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption}
}
@@ -9932,28 +9853,28 @@ yydefault:
case 151:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []DatabaseOption
-//line sql.y:1269
+//line sql.y:1257
{
yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption}
}
yyVAL.union = yyLOCAL
case 152:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1273
+//line sql.y:1261
{
yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].databaseOption)
}
case 153:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1277
+//line sql.y:1265
{
yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].databaseOption)
}
case 154:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1281
+//line sql.y:1269
{
yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].databaseOption)
@@ -9961,7 +9882,7 @@ yydefault:
case 155:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:1287
+//line sql.y:1275
{
yyLOCAL = false
}
@@ -9969,51 +9890,51 @@ yydefault:
case 156:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:1291
+//line sql.y:1279
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
case 157:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1297
+//line sql.y:1285
{
yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
case 158:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1301
+//line sql.y:1289
{
yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
case 159:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1307
+//line sql.y:1295
{
yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
case 160:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1311
+//line sql.y:1299
{
yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
case 161:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1317
+//line sql.y:1305
{
yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
case 162:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1321
+//line sql.y:1309
{
yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()}
}
case 163:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *OptLike
-//line sql.y:1327
+//line sql.y:1315
{
yyLOCAL = &OptLike{LikeTable: yyDollar[2].tableName}
}
@@ -10021,7 +9942,7 @@ yydefault:
case 164:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *OptLike
-//line sql.y:1331
+//line sql.y:1319
{
yyLOCAL = &OptLike{LikeTable: yyDollar[3].tableName}
}
@@ -10029,14 +9950,14 @@ yydefault:
case 165:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*ColumnDefinition
-//line sql.y:1337
+//line sql.y:1325
{
yyLOCAL = []*ColumnDefinition{yyDollar[1].columnDefinitionUnion()}
}
yyVAL.union = yyLOCAL
case 166:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1341
+//line sql.y:1329
{
yySLICE := (*[]*ColumnDefinition)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].columnDefinitionUnion())
@@ -10044,7 +9965,7 @@ yydefault:
case 167:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *TableSpec
-//line sql.y:1347
+//line sql.y:1335
{
yyLOCAL = &TableSpec{}
yyLOCAL.AddColumn(yyDollar[1].columnDefinitionUnion())
@@ -10053,7 +9974,7 @@ yydefault:
case 168:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *TableSpec
-//line sql.y:1352
+//line sql.y:1340
{
yyLOCAL = &TableSpec{}
yyLOCAL.AddConstraint(yyDollar[1].constraintDefinitionUnion())
@@ -10061,39 +9982,39 @@ yydefault:
yyVAL.union = yyLOCAL
case 169:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1357
+//line sql.y:1345
{
yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion())
}
case 170:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1361
+//line sql.y:1349
{
yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion())
yyVAL.tableSpecUnion().AddConstraint(yyDollar[4].constraintDefinitionUnion())
}
case 171:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1366
+//line sql.y:1354
{
yyVAL.tableSpecUnion().AddIndex(yyDollar[3].indexDefinitionUnion())
}
case 172:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1370
+//line sql.y:1358
{
yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion())
}
case 173:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1374
+//line sql.y:1362
{
yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion())
}
case 174:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ColumnDefinition
-//line sql.y:1385
+//line sql.y:1373
{
yyDollar[2].columnType.Options = yyDollar[4].columnTypeOptionsUnion()
if yyDollar[2].columnType.Options.Collate == "" {
@@ -10106,7 +10027,7 @@ yydefault:
case 175:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL *ColumnDefinition
-//line sql.y:1394
+//line sql.y:1382
{
yyDollar[2].columnType.Options = yyDollar[9].columnTypeOptionsUnion()
yyDollar[2].columnType.Options.As = yyDollar[7].exprUnion()
@@ -10117,20 +10038,20 @@ yydefault:
yyVAL.union = yyLOCAL
case 176:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:1403
+//line sql.y:1391
{
yyVAL.str = ""
}
case 177:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1407
+//line sql.y:1395
{
yyVAL.str = ""
}
case 178:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1416
+//line sql.y:1404
{
yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: colKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil}
}
@@ -10138,7 +10059,7 @@ yydefault:
case 179:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1420
+//line sql.y:1408
{
val := true
yyDollar[1].columnTypeOptionsUnion().Null = &val
@@ -10148,7 +10069,7 @@ yydefault:
case 180:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1426
+//line sql.y:1414
{
val := false
yyDollar[1].columnTypeOptionsUnion().Null = &val
@@ -10158,7 +10079,7 @@ yydefault:
case 181:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1432
+//line sql.y:1420
{
yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[4].exprUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10167,7 +10088,7 @@ yydefault:
case 182:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1437
+//line sql.y:1425
{
yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[3].exprUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10176,7 +10097,7 @@ yydefault:
case 183:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1442
+//line sql.y:1430
{
yyDollar[1].columnTypeOptionsUnion().OnUpdate = yyDollar[4].exprUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10185,7 +10106,7 @@ yydefault:
case 184:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1447
+//line sql.y:1435
{
yyDollar[1].columnTypeOptionsUnion().Autoincrement = true
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10194,7 +10115,7 @@ yydefault:
case 185:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1452
+//line sql.y:1440
{
yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str)
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10203,7 +10124,7 @@ yydefault:
case 186:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1457
+//line sql.y:1445
{
yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10211,14 +10132,14 @@ yydefault:
yyVAL.union = yyLOCAL
case 187:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1462
+//line sql.y:1450
{
yyDollar[1].columnTypeOptionsUnion().Collate = encodeSQLString(yyDollar[3].str)
}
case 188:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1466
+//line sql.y:1454
{
yyDollar[1].columnTypeOptionsUnion().Collate = string(yyDollar[3].identifierCI.String())
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10226,14 +10147,14 @@ yydefault:
yyVAL.union = yyLOCAL
case 189:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1471
+//line sql.y:1459
{
yyDollar[1].columnTypeOptionsUnion().Format = yyDollar[3].columnFormatUnion()
}
case 190:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1475
+//line sql.y:1463
{
yyDollar[1].columnTypeOptionsUnion().SRID = NewIntLiteral(yyDollar[3].str)
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10242,7 +10163,7 @@ yydefault:
case 191:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1480
+//line sql.y:1468
{
val := false
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
@@ -10252,7 +10173,7 @@ yydefault:
case 192:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1486
+//line sql.y:1474
{
val := true
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
@@ -10261,20 +10182,20 @@ yydefault:
yyVAL.union = yyLOCAL
case 193:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1492
+//line sql.y:1480
{
yyDollar[1].columnTypeOptionsUnion().EngineAttribute = NewStrLiteral(yyDollar[4].str)
}
case 194:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:1496
+//line sql.y:1484
{
yyDollar[1].columnTypeOptionsUnion().SecondaryEngineAttribute = NewStrLiteral(yyDollar[4].str)
}
case 195:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnFormat
-//line sql.y:1502
+//line sql.y:1490
{
yyLOCAL = FixedFormat
}
@@ -10282,7 +10203,7 @@ yydefault:
case 196:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnFormat
-//line sql.y:1506
+//line sql.y:1494
{
yyLOCAL = DynamicFormat
}
@@ -10290,7 +10211,7 @@ yydefault:
case 197:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnFormat
-//line sql.y:1510
+//line sql.y:1498
{
yyLOCAL = DefaultFormat
}
@@ -10298,7 +10219,7 @@ yydefault:
case 198:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnStorage
-//line sql.y:1516
+//line sql.y:1504
{
yyLOCAL = VirtualStorage
}
@@ -10306,7 +10227,7 @@ yydefault:
case 199:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnStorage
-//line sql.y:1520
+//line sql.y:1508
{
yyLOCAL = StoredStorage
}
@@ -10314,7 +10235,7 @@ yydefault:
case 200:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1525
+//line sql.y:1513
{
yyLOCAL = &ColumnTypeOptions{}
}
@@ -10322,7 +10243,7 @@ yydefault:
case 201:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1529
+//line sql.y:1517
{
yyDollar[1].columnTypeOptionsUnion().Storage = yyDollar[2].columnStorageUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10331,7 +10252,7 @@ yydefault:
case 202:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1534
+//line sql.y:1522
{
val := true
yyDollar[1].columnTypeOptionsUnion().Null = &val
@@ -10341,7 +10262,7 @@ yydefault:
case 203:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1540
+//line sql.y:1528
{
val := false
yyDollar[1].columnTypeOptionsUnion().Null = &val
@@ -10351,7 +10272,7 @@ yydefault:
case 204:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1546
+//line sql.y:1534
{
yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str)
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10360,7 +10281,7 @@ yydefault:
case 205:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1551
+//line sql.y:1539
{
yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion()
yyLOCAL = yyDollar[1].columnTypeOptionsUnion()
@@ -10369,7 +10290,7 @@ yydefault:
case 206:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1556
+//line sql.y:1544
{
val := false
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
@@ -10379,7 +10300,7 @@ yydefault:
case 207:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColumnTypeOptions
-//line sql.y:1562
+//line sql.y:1550
{
val := true
yyDollar[1].columnTypeOptionsUnion().Invisible = &val
@@ -10389,7 +10310,7 @@ yydefault:
case 208:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1570
+//line sql.y:1558
{
yyLOCAL = yyDollar[1].exprUnion()
}
@@ -10397,7 +10318,7 @@ yydefault:
case 210:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1577
+//line sql.y:1565
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_timestamp"), Fsp: yyDollar[2].exprUnion()}
}
@@ -10405,7 +10326,7 @@ yydefault:
case 211:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1581
+//line sql.y:1569
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtime"), Fsp: yyDollar[2].exprUnion()}
}
@@ -10413,7 +10334,7 @@ yydefault:
case 212:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1585
+//line sql.y:1573
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtimestamp"), Fsp: yyDollar[2].exprUnion()}
}
@@ -10421,7 +10342,7 @@ yydefault:
case 213:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1589
+//line sql.y:1577
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_timestamp"), Fsp: yyDollar[2].exprUnion()}
}
@@ -10429,7 +10350,7 @@ yydefault:
case 214:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1593
+//line sql.y:1581
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("now"), Fsp: yyDollar[2].exprUnion()}
}
@@ -10437,7 +10358,7 @@ yydefault:
case 217:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1603
+//line sql.y:1591
{
yyLOCAL = &NullVal{}
}
@@ -10445,7 +10366,7 @@ yydefault:
case 219:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1610
+//line sql.y:1598
{
yyLOCAL = yyDollar[2].exprUnion()
}
@@ -10453,7 +10374,7 @@ yydefault:
case 220:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1614
+//line sql.y:1602
{
yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()}
}
@@ -10461,7 +10382,7 @@ yydefault:
case 221:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1620
+//line sql.y:1608
{
yyLOCAL = yyDollar[1].exprUnion()
}
@@ -10469,7 +10390,7 @@ yydefault:
case 222:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1624
+//line sql.y:1612
{
yyLOCAL = yyDollar[1].exprUnion()
}
@@ -10477,7 +10398,7 @@ yydefault:
case 223:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1628
+//line sql.y:1616
{
yyLOCAL = yyDollar[1].boolValUnion()
}
@@ -10485,7 +10406,7 @@ yydefault:
case 224:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1632
+//line sql.y:1620
{
yyLOCAL = NewHexLiteral(yyDollar[1].str)
}
@@ -10493,7 +10414,7 @@ yydefault:
case 225:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1636
+//line sql.y:1624
{
yyLOCAL = NewHexNumLiteral(yyDollar[1].str)
}
@@ -10501,7 +10422,7 @@ yydefault:
case 226:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1640
+//line sql.y:1628
{
yyLOCAL = NewBitLiteral(yyDollar[1].str[2:])
}
@@ -10509,7 +10430,7 @@ yydefault:
case 227:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1644
+//line sql.y:1632
{
yyLOCAL = NewBitLiteral(yyDollar[1].str)
}
@@ -10517,7 +10438,7 @@ yydefault:
case 228:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1648
+//line sql.y:1636
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
@@ -10526,7 +10447,7 @@ yydefault:
case 229:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1653
+//line sql.y:1641
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str)}
}
@@ -10534,7 +10455,7 @@ yydefault:
case 230:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1657
+//line sql.y:1645
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexNumLiteral(yyDollar[2].str)}
}
@@ -10542,7 +10463,7 @@ yydefault:
case 231:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1661
+//line sql.y:1649
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str[2:])}
}
@@ -10550,7 +10471,7 @@ yydefault:
case 232:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1665
+//line sql.y:1653
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexLiteral(yyDollar[2].str)}
}
@@ -10558,7 +10479,7 @@ yydefault:
case 233:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1669
+//line sql.y:1657
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: yyDollar[2].exprUnion()}
}
@@ -10566,7 +10487,7 @@ yydefault:
case 234:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1673
+//line sql.y:1661
{
bindVariable(yylex, yyDollar[2].str[1:])
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewArgument(yyDollar[2].str[1:])}
@@ -10575,7 +10496,7 @@ yydefault:
case 235:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1678
+//line sql.y:1666
{
yyLOCAL = NewDateLiteral(yyDollar[2].str)
}
@@ -10583,7 +10504,7 @@ yydefault:
case 236:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1682
+//line sql.y:1670
{
yyLOCAL = NewTimeLiteral(yyDollar[2].str)
}
@@ -10591,267 +10512,267 @@ yydefault:
case 237:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1686
+//line sql.y:1674
{
yyLOCAL = NewTimestampLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
case 238:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1692
+//line sql.y:1680
{
yyVAL.str = Armscii8Str
}
case 239:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1696
+//line sql.y:1684
{
yyVAL.str = ASCIIStr
}
case 240:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1700
+//line sql.y:1688
{
yyVAL.str = Big5Str
}
case 241:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1704
+//line sql.y:1692
{
yyVAL.str = UBinaryStr
}
case 242:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1708
+//line sql.y:1696
{
yyVAL.str = Cp1250Str
}
case 243:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1712
+//line sql.y:1700
{
yyVAL.str = Cp1251Str
}
case 244:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1716
+//line sql.y:1704
{
yyVAL.str = Cp1256Str
}
case 245:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1720
+//line sql.y:1708
{
yyVAL.str = Cp1257Str
}
case 246:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1724
+//line sql.y:1712
{
yyVAL.str = Cp850Str
}
case 247:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1728
+//line sql.y:1716
{
yyVAL.str = Cp852Str
}
case 248:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1732
+//line sql.y:1720
{
yyVAL.str = Cp866Str
}
case 249:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1736
+//line sql.y:1724
{
yyVAL.str = Cp932Str
}
case 250:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1740
+//line sql.y:1728
{
yyVAL.str = Dec8Str
}
case 251:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1744
+//line sql.y:1732
{
yyVAL.str = EucjpmsStr
}
case 252:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1748
+//line sql.y:1736
{
yyVAL.str = EuckrStr
}
case 253:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1752
+//line sql.y:1740
{
yyVAL.str = Gb18030Str
}
case 254:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1756
+//line sql.y:1744
{
yyVAL.str = Gb2312Str
}
case 255:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1760
+//line sql.y:1748
{
yyVAL.str = GbkStr
}
case 256:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1764
+//line sql.y:1752
{
yyVAL.str = Geostd8Str
}
case 257:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1768
+//line sql.y:1756
{
yyVAL.str = GreekStr
}
case 258:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1772
+//line sql.y:1760
{
yyVAL.str = HebrewStr
}
case 259:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1776
+//line sql.y:1764
{
yyVAL.str = Hp8Str
}
case 260:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1780
+//line sql.y:1768
{
yyVAL.str = Keybcs2Str
}
case 261:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1784
+//line sql.y:1772
{
yyVAL.str = Koi8rStr
}
case 262:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1788
+//line sql.y:1776
{
yyVAL.str = Koi8uStr
}
case 263:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1792
+//line sql.y:1780
{
yyVAL.str = Latin1Str
}
case 264:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1796
+//line sql.y:1784
{
yyVAL.str = Latin2Str
}
case 265:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1800
+//line sql.y:1788
{
yyVAL.str = Latin5Str
}
case 266:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1804
+//line sql.y:1792
{
yyVAL.str = Latin7Str
}
case 267:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1808
+//line sql.y:1796
{
yyVAL.str = MacceStr
}
case 268:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1812
+//line sql.y:1800
{
yyVAL.str = MacromanStr
}
case 269:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1816
+//line sql.y:1804
{
yyVAL.str = SjisStr
}
case 270:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1820
+//line sql.y:1808
{
yyVAL.str = Swe7Str
}
case 271:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1824
+//line sql.y:1812
{
yyVAL.str = Tis620Str
}
case 272:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1828
+//line sql.y:1816
{
yyVAL.str = Ucs2Str
}
case 273:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1832
+//line sql.y:1820
{
yyVAL.str = UjisStr
}
case 274:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1836
+//line sql.y:1824
{
yyVAL.str = Utf16Str
}
case 275:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1840
+//line sql.y:1828
{
yyVAL.str = Utf16leStr
}
case 276:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1844
+//line sql.y:1832
{
yyVAL.str = Utf32Str
}
case 277:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1848
+//line sql.y:1836
{
yyVAL.str = Utf8Str
}
case 278:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1852
+//line sql.y:1840
{
yyVAL.str = Utf8mb4Str
}
case 279:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1856
+//line sql.y:1844
{
yyVAL.str = Utf8Str
}
case 282:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1866
+//line sql.y:1854
{
yyLOCAL = NewIntLiteral(yyDollar[1].str)
}
@@ -10859,7 +10780,7 @@ yydefault:
case 283:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1870
+//line sql.y:1858
{
yyLOCAL = NewFloatLiteral(yyDollar[1].str)
}
@@ -10867,7 +10788,7 @@ yydefault:
case 284:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1874
+//line sql.y:1862
{
yyLOCAL = NewDecimalLiteral(yyDollar[1].str)
}
@@ -10875,7 +10796,7 @@ yydefault:
case 285:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1880
+//line sql.y:1868
{
yyLOCAL = NewStrLiteral(yyDollar[1].str)
}
@@ -10883,7 +10804,7 @@ yydefault:
case 286:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1884
+//line sql.y:1872
{
yyLOCAL = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral(yyDollar[1].str)}
}
@@ -10891,7 +10812,7 @@ yydefault:
case 287:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1888
+//line sql.y:1876
{
yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewStrLiteral(yyDollar[2].str)}
}
@@ -10899,7 +10820,7 @@ yydefault:
case 288:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1894
+//line sql.y:1882
{
yyLOCAL = yyDollar[1].exprUnion()
}
@@ -10907,7 +10828,7 @@ yydefault:
case 289:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:1898
+//line sql.y:1886
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
@@ -10916,7 +10837,7 @@ yydefault:
case 290:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1905
+//line sql.y:1893
{
yyLOCAL = colKeyPrimary
}
@@ -10924,7 +10845,7 @@ yydefault:
case 291:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1909
+//line sql.y:1897
{
yyLOCAL = colKeyUnique
}
@@ -10932,7 +10853,7 @@ yydefault:
case 292:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1913
+//line sql.y:1901
{
yyLOCAL = colKeyUniqueKey
}
@@ -10940,14 +10861,14 @@ yydefault:
case 293:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColumnKeyOption
-//line sql.y:1917
+//line sql.y:1905
{
yyLOCAL = colKey
}
yyVAL.union = yyLOCAL
case 294:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:1923
+//line sql.y:1911
{
yyVAL.columnType = yyDollar[1].columnType
yyVAL.columnType.Unsigned = yyDollar[2].booleanUnion()
@@ -10955,74 +10876,74 @@ yydefault:
}
case 298:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1934
+//line sql.y:1922
{
yyVAL.columnType = yyDollar[1].columnType
yyVAL.columnType.Length = yyDollar[2].literalUnion()
}
case 299:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1939
+//line sql.y:1927
{
yyVAL.columnType = yyDollar[1].columnType
}
case 300:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1945
+//line sql.y:1933
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 301:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1949
+//line sql.y:1937
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 302:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1953
+//line sql.y:1941
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 303:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1957
+//line sql.y:1945
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 304:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1961
+//line sql.y:1949
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 305:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1965
+//line sql.y:1953
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 306:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1969
+//line sql.y:1957
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 307:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1973
+//line sql.y:1961
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 308:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:1977
+//line sql.y:1965
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 309:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1983
+//line sql.y:1971
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
@@ -11030,7 +10951,7 @@ yydefault:
}
case 310:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1989
+//line sql.y:1977
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
@@ -11038,7 +10959,7 @@ yydefault:
}
case 311:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:1995
+//line sql.y:1983
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
@@ -11046,7 +10967,7 @@ yydefault:
}
case 312:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2001
+//line sql.y:1989
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
@@ -11054,7 +10975,7 @@ yydefault:
}
case 313:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2007
+//line sql.y:1995
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
@@ -11062,43 +10983,43 @@ yydefault:
}
case 314:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2015
+//line sql.y:2003
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 315:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2019
+//line sql.y:2007
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
case 316:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2023
+//line sql.y:2011
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
case 317:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2027
+//line sql.y:2015
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
case 318:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2031
+//line sql.y:2019
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
case 319:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2037
+//line sql.y:2025
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
}
case 320:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2041
+//line sql.y:2029
{
// CHAR BYTE is an alias for binary. See also:
// https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html
@@ -11106,153 +11027,153 @@ yydefault:
}
case 321:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2047
+//line sql.y:2035
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
}
case 322:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2051
+//line sql.y:2039
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
case 323:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2055
+//line sql.y:2043
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
case 324:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2059
+//line sql.y:2047
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
case 325:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2063
+//line sql.y:2051
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
case 326:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2067
+//line sql.y:2055
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
case 327:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2071
+//line sql.y:2059
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset}
}
case 328:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2075
+//line sql.y:2063
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 329:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2079
+//line sql.y:2067
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 330:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2083
+//line sql.y:2071
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 331:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2087
+//line sql.y:2075
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 332:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2091
+//line sql.y:2079
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 333:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2095
+//line sql.y:2083
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset}
}
case 334:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2100
+//line sql.y:2088
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset}
}
case 335:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2106
+//line sql.y:2094
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 336:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2110
+//line sql.y:2098
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 337:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2114
+//line sql.y:2102
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 338:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2118
+//line sql.y:2106
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 339:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2122
+//line sql.y:2110
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 340:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2126
+//line sql.y:2114
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 341:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2130
+//line sql.y:2118
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 342:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2134
+//line sql.y:2122
{
yyVAL.columnType = ColumnType{Type: string(yyDollar[1].str)}
}
case 343:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2140
+//line sql.y:2128
{
yyVAL.strs = make([]string, 0, 4)
yyVAL.strs = append(yyVAL.strs, encodeSQLString(yyDollar[1].str))
}
case 344:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2145
+//line sql.y:2133
{
yyVAL.strs = append(yyDollar[1].strs, encodeSQLString(yyDollar[3].str))
}
case 345:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2150
+//line sql.y:2138
{
yyLOCAL = nil
}
@@ -11260,20 +11181,20 @@ yydefault:
case 346:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2154
+//line sql.y:2142
{
yyLOCAL = NewIntLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
case 347:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2159
+//line sql.y:2147
{
yyVAL.LengthScaleOption = LengthScaleOption{}
}
case 348:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2163
+//line sql.y:2151
{
yyVAL.LengthScaleOption = LengthScaleOption{
Length: NewIntLiteral(yyDollar[2].str),
@@ -11282,13 +11203,13 @@ yydefault:
}
case 349:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2171
+//line sql.y:2159
{
yyVAL.LengthScaleOption = LengthScaleOption{}
}
case 350:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2175
+//line sql.y:2163
{
yyVAL.LengthScaleOption = LengthScaleOption{
Length: NewIntLiteral(yyDollar[2].str),
@@ -11296,7 +11217,7 @@ yydefault:
}
case 351:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2181
+//line sql.y:2169
{
yyVAL.LengthScaleOption = LengthScaleOption{
Length: NewIntLiteral(yyDollar[2].str),
@@ -11306,7 +11227,7 @@ yydefault:
case 352:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2189
+//line sql.y:2177
{
yyLOCAL = false
}
@@ -11314,7 +11235,7 @@ yydefault:
case 353:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2193
+//line sql.y:2181
{
yyLOCAL = true
}
@@ -11322,7 +11243,7 @@ yydefault:
case 354:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2197
+//line sql.y:2185
{
yyLOCAL = false
}
@@ -11330,7 +11251,7 @@ yydefault:
case 355:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2202
+//line sql.y:2190
{
yyLOCAL = false
}
@@ -11338,66 +11259,66 @@ yydefault:
case 356:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2206
+//line sql.y:2194
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
case 357:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2211
+//line sql.y:2199
{
yyVAL.columnCharset = ColumnCharset{}
}
case 358:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2215
+//line sql.y:2203
{
yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].identifierCI.String()), Binary: yyDollar[3].booleanUnion()}
}
case 359:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2219
+//line sql.y:2207
{
yyVAL.columnCharset = ColumnCharset{Name: encodeSQLString(yyDollar[2].str), Binary: yyDollar[3].booleanUnion()}
}
case 360:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2223
+//line sql.y:2211
{
yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].str)}
}
case 361:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2227
+//line sql.y:2215
{
// ASCII: Shorthand for CHARACTER SET latin1.
yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: yyDollar[2].booleanUnion()}
}
case 362:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2232
+//line sql.y:2220
{
// UNICODE: Shorthand for CHARACTER SET ucs2.
yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: yyDollar[2].booleanUnion()}
}
case 363:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2237
+//line sql.y:2225
{
// BINARY: Shorthand for default CHARACTER SET but with binary collation
yyVAL.columnCharset = ColumnCharset{Name: "", Binary: true}
}
case 364:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2242
+//line sql.y:2230
{
// BINARY ASCII: Shorthand for CHARACTER SET latin1 with binary collation
yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: true}
}
case 365:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2247
+//line sql.y:2235
{
// BINARY UNICODE: Shorthand for CHARACTER SET ucs2 with binary collation
yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: true}
@@ -11405,7 +11326,7 @@ yydefault:
case 366:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2253
+//line sql.y:2241
{
yyLOCAL = false
}
@@ -11413,33 +11334,33 @@ yydefault:
case 367:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2257
+//line sql.y:2245
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
case 368:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2262
+//line sql.y:2250
{
yyVAL.str = ""
}
case 369:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2266
+//line sql.y:2254
{
yyVAL.str = string(yyDollar[2].identifierCI.String())
}
case 370:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2270
+//line sql.y:2258
{
yyVAL.str = encodeSQLString(yyDollar[2].str)
}
case 371:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *IndexDefinition
-//line sql.y:2276
+//line sql.y:2264
{
yyLOCAL = &IndexDefinition{Info: yyDollar[1].indexInfoUnion(), Columns: yyDollar[3].indexColumnsUnion(), Options: yyDollar[5].indexOptionsUnion()}
}
@@ -11447,7 +11368,7 @@ yydefault:
case 372:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:2281
+//line sql.y:2269
{
yyLOCAL = nil
}
@@ -11455,7 +11376,7 @@ yydefault:
case 373:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:2285
+//line sql.y:2273
{
yyLOCAL = yyDollar[1].indexOptionsUnion()
}
@@ -11463,14 +11384,14 @@ yydefault:
case 374:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:2291
+//line sql.y:2279
{
yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()}
}
yyVAL.union = yyLOCAL
case 375:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2295
+//line sql.y:2283
{
yySLICE := (*[]*IndexOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].indexOptionUnion())
@@ -11478,7 +11399,7 @@ yydefault:
case 376:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2301
+//line sql.y:2289
{
yyLOCAL = yyDollar[1].indexOptionUnion()
}
@@ -11486,7 +11407,7 @@ yydefault:
case 377:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2305
+//line sql.y:2293
{
// should not be string
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
@@ -11495,7 +11416,7 @@ yydefault:
case 378:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2310
+//line sql.y:2298
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[2].str)}
}
@@ -11503,7 +11424,7 @@ yydefault:
case 379:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2314
+//line sql.y:2302
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)}
}
@@ -11511,7 +11432,7 @@ yydefault:
case 380:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2318
+//line sql.y:2306
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)}
}
@@ -11519,7 +11440,7 @@ yydefault:
case 381:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2322
+//line sql.y:2310
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str) + " " + string(yyDollar[2].str), String: yyDollar[3].identifierCI.String()}
}
@@ -11527,7 +11448,7 @@ yydefault:
case 382:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2326
+//line sql.y:2314
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -11535,27 +11456,27 @@ yydefault:
case 383:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:2330
+//line sql.y:2318
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
case 384:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2336
+//line sql.y:2324
{
yyVAL.str = ""
}
case 385:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2340
+//line sql.y:2328
{
yyVAL.str = string(yyDollar[1].str)
}
case 386:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2346
+//line sql.y:2334
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI("PRIMARY"), Primary: true, Unique: true}
}
@@ -11563,7 +11484,7 @@ yydefault:
case 387:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2350
+//line sql.y:2338
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Spatial: true, Unique: false}
}
@@ -11571,7 +11492,7 @@ yydefault:
case 388:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2354
+//line sql.y:2342
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Fulltext: true, Unique: false}
}
@@ -11579,7 +11500,7 @@ yydefault:
case 389:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2358
+//line sql.y:2346
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[4].str), Unique: true}
}
@@ -11587,100 +11508,100 @@ yydefault:
case 390:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *IndexInfo
-//line sql.y:2362
+//line sql.y:2350
{
yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[2].str), Unique: false}
}
yyVAL.union = yyLOCAL
case 391:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2367
+//line sql.y:2355
{
yyVAL.str = ""
}
case 392:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2371
+//line sql.y:2359
{
yyVAL.str = yyDollar[2].str
}
case 393:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2377
+//line sql.y:2365
{
yyVAL.str = string(yyDollar[1].str)
}
case 394:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2381
+//line sql.y:2369
{
yyVAL.str = string(yyDollar[1].str)
}
case 395:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2385
+//line sql.y:2373
{
yyVAL.str = string(yyDollar[1].str)
}
case 396:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2391
+//line sql.y:2379
{
yyVAL.str = string(yyDollar[1].str)
}
case 397:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2395
+//line sql.y:2383
{
yyVAL.str = string(yyDollar[1].str)
}
case 398:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2400
+//line sql.y:2388
{
yyVAL.str = "key"
}
case 399:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2404
+//line sql.y:2392
{
yyVAL.str = yyDollar[1].str
}
case 400:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2410
+//line sql.y:2398
{
yyVAL.str = string(yyDollar[1].str)
}
case 401:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2414
+//line sql.y:2402
{
yyVAL.str = string(yyDollar[1].str)
}
case 402:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2419
+//line sql.y:2407
{
yyVAL.str = ""
}
case 403:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2423
+//line sql.y:2411
{
yyVAL.str = string(yyDollar[1].identifierCI.String())
}
case 404:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexColumn
-//line sql.y:2429
+//line sql.y:2417
{
yyLOCAL = []*IndexColumn{yyDollar[1].indexColumnUnion()}
}
yyVAL.union = yyLOCAL
case 405:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2433
+//line sql.y:2421
{
yySLICE := (*[]*IndexColumn)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].indexColumnUnion())
@@ -11688,7 +11609,7 @@ yydefault:
case 406:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *IndexColumn
-//line sql.y:2439
+//line sql.y:2427
{
yyLOCAL = &IndexColumn{Column: yyDollar[1].identifierCI, Length: yyDollar[2].literalUnion(), Direction: yyDollar[3].orderDirectionUnion()}
}
@@ -11696,7 +11617,7 @@ yydefault:
case 407:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *IndexColumn
-//line sql.y:2443
+//line sql.y:2431
{
yyLOCAL = &IndexColumn{Expression: yyDollar[2].exprUnion(), Direction: yyDollar[4].orderDirectionUnion()}
}
@@ -11704,7 +11625,7 @@ yydefault:
case 408:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2449
+//line sql.y:2437
{
yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()}
}
@@ -11712,7 +11633,7 @@ yydefault:
case 409:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2453
+//line sql.y:2441
{
yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()}
}
@@ -11720,7 +11641,7 @@ yydefault:
case 410:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2459
+//line sql.y:2447
{
yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()}
}
@@ -11728,7 +11649,7 @@ yydefault:
case 411:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConstraintDefinition
-//line sql.y:2463
+//line sql.y:2451
{
yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()}
}
@@ -11736,7 +11657,7 @@ yydefault:
case 412:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL ConstraintInfo
-//line sql.y:2469
+//line sql.y:2457
{
yyLOCAL = &ForeignKeyDefinition{IndexName: NewIdentifierCI(yyDollar[3].str), Source: yyDollar[5].columnsUnion(), ReferenceDefinition: yyDollar[7].referenceDefinitionUnion()}
}
@@ -11744,7 +11665,7 @@ yydefault:
case 413:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2475
+//line sql.y:2463
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion()}
}
@@ -11752,7 +11673,7 @@ yydefault:
case 414:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2479
+//line sql.y:2467
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion()}
}
@@ -11760,7 +11681,7 @@ yydefault:
case 415:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2483
+//line sql.y:2471
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion()}
}
@@ -11768,7 +11689,7 @@ yydefault:
case 416:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2487
+//line sql.y:2475
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion(), OnUpdate: yyDollar[8].referenceActionUnion()}
}
@@ -11776,7 +11697,7 @@ yydefault:
case 417:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2491
+//line sql.y:2479
{
yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion(), OnDelete: yyDollar[8].referenceActionUnion()}
}
@@ -11784,7 +11705,7 @@ yydefault:
case 418:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2496
+//line sql.y:2484
{
yyLOCAL = nil
}
@@ -11792,7 +11713,7 @@ yydefault:
case 419:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ReferenceDefinition
-//line sql.y:2500
+//line sql.y:2488
{
yyLOCAL = yyDollar[1].referenceDefinitionUnion()
}
@@ -11800,7 +11721,7 @@ yydefault:
case 420:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL ConstraintInfo
-//line sql.y:2506
+//line sql.y:2494
{
yyLOCAL = &CheckConstraintDefinition{Expr: yyDollar[3].exprUnion(), Enforced: yyDollar[5].booleanUnion()}
}
@@ -11808,7 +11729,7 @@ yydefault:
case 421:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2512
+//line sql.y:2500
{
yyLOCAL = yyDollar[2].matchActionUnion()
}
@@ -11816,7 +11737,7 @@ yydefault:
case 422:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2518
+//line sql.y:2506
{
yyLOCAL = Full
}
@@ -11824,7 +11745,7 @@ yydefault:
case 423:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2522
+//line sql.y:2510
{
yyLOCAL = Partial
}
@@ -11832,7 +11753,7 @@ yydefault:
case 424:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2526
+//line sql.y:2514
{
yyLOCAL = Simple
}
@@ -11840,7 +11761,7 @@ yydefault:
case 425:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2531
+//line sql.y:2519
{
yyLOCAL = DefaultMatch
}
@@ -11848,7 +11769,7 @@ yydefault:
case 426:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL MatchAction
-//line sql.y:2535
+//line sql.y:2523
{
yyLOCAL = yyDollar[1].matchActionUnion()
}
@@ -11856,7 +11777,7 @@ yydefault:
case 427:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2541
+//line sql.y:2529
{
yyLOCAL = yyDollar[3].referenceActionUnion()
}
@@ -11864,7 +11785,7 @@ yydefault:
case 428:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2547
+//line sql.y:2535
{
yyLOCAL = yyDollar[3].referenceActionUnion()
}
@@ -11872,7 +11793,7 @@ yydefault:
case 429:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2553
+//line sql.y:2541
{
yyLOCAL = Restrict
}
@@ -11880,7 +11801,7 @@ yydefault:
case 430:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2557
+//line sql.y:2545
{
yyLOCAL = Cascade
}
@@ -11888,7 +11809,7 @@ yydefault:
case 431:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2561
+//line sql.y:2549
{
yyLOCAL = NoAction
}
@@ -11896,7 +11817,7 @@ yydefault:
case 432:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2565
+//line sql.y:2553
{
yyLOCAL = SetDefault
}
@@ -11904,33 +11825,33 @@ yydefault:
case 433:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ReferenceAction
-//line sql.y:2569
+//line sql.y:2557
{
yyLOCAL = SetNull
}
yyVAL.union = yyLOCAL
case 434:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2574
+//line sql.y:2562
{
yyVAL.str = ""
}
case 435:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2578
+//line sql.y:2566
{
yyVAL.str = string(yyDollar[1].str)
}
case 436:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2582
+//line sql.y:2570
{
yyVAL.str = string(yyDollar[1].str)
}
case 437:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2588
+//line sql.y:2576
{
yyLOCAL = true
}
@@ -11938,7 +11859,7 @@ yydefault:
case 438:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:2592
+//line sql.y:2580
{
yyLOCAL = false
}
@@ -11946,7 +11867,7 @@ yydefault:
case 439:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2597
+//line sql.y:2585
{
yyLOCAL = true
}
@@ -11954,7 +11875,7 @@ yydefault:
case 440:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2601
+//line sql.y:2589
{
yyLOCAL = yyDollar[1].booleanUnion()
}
@@ -11962,7 +11883,7 @@ yydefault:
case 441:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2606
+//line sql.y:2594
{
yyLOCAL = nil
}
@@ -11970,7 +11891,7 @@ yydefault:
case 442:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2610
+//line sql.y:2598
{
yyLOCAL = yyDollar[1].tableOptionsUnion()
}
@@ -11978,21 +11899,21 @@ yydefault:
case 443:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2616
+//line sql.y:2604
{
yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()}
}
yyVAL.union = yyLOCAL
case 444:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2620
+//line sql.y:2608
{
yySLICE := (*TableOptions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableOptionUnion())
}
case 445:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2624
+//line sql.y:2612
{
yySLICE := (*TableOptions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion())
@@ -12000,14 +11921,14 @@ yydefault:
case 446:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableOptions
-//line sql.y:2630
+//line sql.y:2618
{
yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()}
}
yyVAL.union = yyLOCAL
case 447:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2634
+//line sql.y:2622
{
yySLICE := (*TableOptions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion())
@@ -12015,7 +11936,7 @@ yydefault:
case 448:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2640
+//line sql.y:2628
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12023,7 +11944,7 @@ yydefault:
case 449:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2644
+//line sql.y:2632
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12031,7 +11952,7 @@ yydefault:
case 450:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2648
+//line sql.y:2636
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12039,7 +11960,7 @@ yydefault:
case 451:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2652
+//line sql.y:2640
{
yyLOCAL = &TableOption{Name: (string(yyDollar[2].str)), String: yyDollar[4].str, CaseSensitive: true}
}
@@ -12047,7 +11968,7 @@ yydefault:
case 452:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2656
+//line sql.y:2644
{
yyLOCAL = &TableOption{Name: string(yyDollar[2].str), String: yyDollar[4].str, CaseSensitive: true}
}
@@ -12055,7 +11976,7 @@ yydefault:
case 453:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2660
+//line sql.y:2648
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12063,7 +11984,7 @@ yydefault:
case 454:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2664
+//line sql.y:2652
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12071,7 +11992,7 @@ yydefault:
case 455:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2668
+//line sql.y:2656
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12079,7 +12000,7 @@ yydefault:
case 456:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2672
+//line sql.y:2660
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12087,7 +12008,7 @@ yydefault:
case 457:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2676
+//line sql.y:2664
{
yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)}
}
@@ -12095,7 +12016,7 @@ yydefault:
case 458:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2680
+//line sql.y:2668
{
yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)}
}
@@ -12103,7 +12024,7 @@ yydefault:
case 459:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2684
+//line sql.y:2672
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12111,7 +12032,7 @@ yydefault:
case 460:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2688
+//line sql.y:2676
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12119,7 +12040,7 @@ yydefault:
case 461:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2692
+//line sql.y:2680
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: yyDollar[3].identifierCS.String(), CaseSensitive: true}
}
@@ -12127,7 +12048,7 @@ yydefault:
case 462:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2696
+//line sql.y:2684
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12135,7 +12056,7 @@ yydefault:
case 463:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2700
+//line sql.y:2688
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
@@ -12143,7 +12064,7 @@ yydefault:
case 464:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2704
+//line sql.y:2692
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12151,7 +12072,7 @@ yydefault:
case 465:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2708
+//line sql.y:2696
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12159,7 +12080,7 @@ yydefault:
case 466:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2712
+//line sql.y:2700
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12167,7 +12088,7 @@ yydefault:
case 467:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2716
+//line sql.y:2704
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12175,7 +12096,7 @@ yydefault:
case 468:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2720
+//line sql.y:2708
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
@@ -12183,7 +12104,7 @@ yydefault:
case 469:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2724
+//line sql.y:2712
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12191,7 +12112,7 @@ yydefault:
case 470:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2728
+//line sql.y:2716
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
@@ -12199,7 +12120,7 @@ yydefault:
case 471:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2732
+//line sql.y:2720
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)}
}
@@ -12207,7 +12128,7 @@ yydefault:
case 472:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2736
+//line sql.y:2724
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12215,7 +12136,7 @@ yydefault:
case 473:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2740
+//line sql.y:2728
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
@@ -12223,7 +12144,7 @@ yydefault:
case 474:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2744
+//line sql.y:2732
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12231,7 +12152,7 @@ yydefault:
case 475:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2748
+//line sql.y:2736
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)}
}
@@ -12239,7 +12160,7 @@ yydefault:
case 476:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2752
+//line sql.y:2740
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)}
}
@@ -12247,7 +12168,7 @@ yydefault:
case 477:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2756
+//line sql.y:2744
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str)}
}
@@ -12255,538 +12176,544 @@ yydefault:
case 478:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *TableOption
-//line sql.y:2760
+//line sql.y:2748
{
yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Tables: yyDollar[4].tableNamesUnion()}
}
yyVAL.union = yyLOCAL
case 479:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2765
+//line sql.y:2753
{
yyVAL.str = ""
}
case 480:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2769
+//line sql.y:2757
{
yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
case 481:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2773
+//line sql.y:2761
{
yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
case 491:
+ yyDollar = yyS[yypt-3 : yypt+1]
+//line sql.y:2780
+ {
+ yyVAL.str = String(TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS})
+ }
+ case 492:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2792
+//line sql.y:2784
{
yyVAL.str = yyDollar[1].identifierCI.String()
}
- case 492:
+ case 493:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2796
+//line sql.y:2788
{
yyVAL.str = encodeSQLString(yyDollar[1].str)
}
- case 493:
+ case 494:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:2800
+//line sql.y:2792
{
yyVAL.str = string(yyDollar[1].str)
}
- case 494:
+ case 495:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2805
+//line sql.y:2797
{
yyVAL.str = ""
}
- case 496:
+ case 497:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:2811
+//line sql.y:2803
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 497:
+ case 498:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:2815
+//line sql.y:2807
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 498:
+ case 499:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:2820
+//line sql.y:2812
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 499:
+ case 500:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:2824
+//line sql.y:2816
{
yyLOCAL = yyDollar[2].colNameUnion()
}
yyVAL.union = yyLOCAL
- case 500:
+ case 501:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:2829
+//line sql.y:2821
{
yyVAL.str = ""
}
- case 501:
+ case 502:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:2833
+//line sql.y:2825
{
yyVAL.str = string(yyDollar[2].str)
}
- case 502:
+ case 503:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2838
+//line sql.y:2830
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 503:
+ case 504:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2842
+//line sql.y:2834
{
yyLOCAL = NewIntLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 504:
+ case 505:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:2846
+//line sql.y:2838
{
yyLOCAL = NewDecimalLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 505:
+ case 506:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2851
+//line sql.y:2843
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 506:
+ case 507:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2855
+//line sql.y:2847
{
yyLOCAL = yyDollar[1].alterOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 507:
+ case 508:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:2859
+//line sql.y:2851
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, &OrderByOption{Cols: yyDollar[5].columnsUnion()})
}
- case 508:
+ case 509:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2863
+//line sql.y:2855
{
yyLOCAL = yyDollar[1].alterOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 509:
+ case 510:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2867
+//line sql.y:2859
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionsUnion()...)
}
- case 510:
+ case 511:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2871
+//line sql.y:2863
{
yyLOCAL = append(append(yyDollar[1].alterOptionsUnion(), yyDollar[3].alterOptionsUnion()...), &OrderByOption{Cols: yyDollar[7].columnsUnion()})
}
yyVAL.union = yyLOCAL
- case 511:
+ case 512:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:2877
+//line sql.y:2869
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 512:
+ case 513:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2881
+//line sql.y:2873
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion())
}
- case 513:
+ case 514:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:2885
+//line sql.y:2877
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion())
}
- case 514:
+ case 515:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2891
+//line sql.y:2883
{
yyLOCAL = yyDollar[1].tableOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 515:
+ case 516:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2895
+//line sql.y:2887
{
yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 516:
+ case 517:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2899
+//line sql.y:2891
{
yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 517:
+ case 518:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2903
+//line sql.y:2895
{
yyLOCAL = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 518:
+ case 519:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2907
+//line sql.y:2899
{
yyLOCAL = &AddColumns{Columns: yyDollar[4].columnDefinitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 519:
+ case 520:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2911
+//line sql.y:2903
{
yyLOCAL = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinitionUnion()}, First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 520:
+ case 521:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2915
+//line sql.y:2907
{
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: true}
}
yyVAL.union = yyLOCAL
- case 521:
+ case 522:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2919
+//line sql.y:2911
{
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 522:
+ case 523:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2923
+//line sql.y:2915
{
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 523:
+ case 524:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2927
+//line sql.y:2919
{
val := false
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val}
}
yyVAL.union = yyLOCAL
- case 524:
+ case 525:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2932
+//line sql.y:2924
{
val := true
yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val}
}
yyVAL.union = yyLOCAL
- case 525:
+ case 526:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2937
+//line sql.y:2929
{
yyLOCAL = &AlterCheck{Name: yyDollar[3].identifierCI, Enforced: yyDollar[4].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 526:
+ case 527:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2941
+//line sql.y:2933
{
yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: false}
}
yyVAL.union = yyLOCAL
- case 527:
+ case 528:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2945
+//line sql.y:2937
{
yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: true}
}
yyVAL.union = yyLOCAL
- case 528:
+ case 529:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2949
+//line sql.y:2941
{
yyLOCAL = &ChangeColumn{OldColumn: yyDollar[3].colNameUnion(), NewColDefinition: yyDollar[4].columnDefinitionUnion(), First: yyDollar[5].booleanUnion(), After: yyDollar[6].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 529:
+ case 530:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2953
+//line sql.y:2945
{
yyLOCAL = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinitionUnion(), First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 530:
+ case 531:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2957
+//line sql.y:2949
{
yyLOCAL = &RenameColumn{OldName: yyDollar[3].colNameUnion(), NewName: yyDollar[5].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 531:
+ case 532:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2961
+//line sql.y:2953
{
yyLOCAL = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str}
}
yyVAL.union = yyLOCAL
- case 532:
+ case 533:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2965
+//line sql.y:2957
{
yyLOCAL = &KeyState{Enable: false}
}
yyVAL.union = yyLOCAL
- case 533:
+ case 534:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2969
+//line sql.y:2961
{
yyLOCAL = &KeyState{Enable: true}
}
yyVAL.union = yyLOCAL
- case 534:
+ case 535:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2973
+//line sql.y:2965
{
yyLOCAL = &TablespaceOperation{Import: false}
}
yyVAL.union = yyLOCAL
- case 535:
+ case 536:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2977
+//line sql.y:2969
{
yyLOCAL = &TablespaceOperation{Import: true}
}
yyVAL.union = yyLOCAL
- case 536:
+ case 537:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2981
+//line sql.y:2973
{
yyLOCAL = &DropColumn{Name: yyDollar[3].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 537:
+ case 538:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2985
+//line sql.y:2977
{
yyLOCAL = &DropKey{Type: NormalKeyType, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 538:
+ case 539:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2989
+//line sql.y:2981
{
yyLOCAL = &DropKey{Type: PrimaryKeyType}
}
yyVAL.union = yyLOCAL
- case 539:
+ case 540:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2993
+//line sql.y:2985
{
yyLOCAL = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].identifierCI}
}
yyVAL.union = yyLOCAL
- case 540:
+ case 541:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:2997
+//line sql.y:2989
{
yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 541:
+ case 542:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3001
+//line sql.y:2993
{
yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 542:
+ case 543:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3005
+//line sql.y:2997
{
yyLOCAL = &Force{}
}
yyVAL.union = yyLOCAL
- case 543:
+ case 544:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3009
+//line sql.y:3001
{
yyLOCAL = &RenameTableName{Table: yyDollar[3].tableName}
}
yyVAL.union = yyLOCAL
- case 544:
+ case 545:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3013
+//line sql.y:3005
{
yyLOCAL = &RenameIndex{OldName: yyDollar[3].identifierCI, NewName: yyDollar[5].identifierCI}
}
yyVAL.union = yyLOCAL
- case 545:
+ case 546:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:3019
+//line sql.y:3011
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 546:
+ case 547:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3023
+//line sql.y:3015
{
yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion())
}
- case 547:
+ case 548:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3029
+//line sql.y:3021
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 548:
+ case 549:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3033
+//line sql.y:3025
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 549:
+ case 550:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3037
+//line sql.y:3029
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 550:
+ case 551:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3041
+//line sql.y:3033
{
yyLOCAL = AlgorithmValue(string(yyDollar[3].str))
}
yyVAL.union = yyLOCAL
- case 551:
+ case 552:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3045
+//line sql.y:3037
{
yyLOCAL = &LockOption{Type: DefaultType}
}
yyVAL.union = yyLOCAL
- case 552:
+ case 553:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3049
+//line sql.y:3041
{
yyLOCAL = &LockOption{Type: NoneType}
}
yyVAL.union = yyLOCAL
- case 553:
+ case 554:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3053
+//line sql.y:3045
{
yyLOCAL = &LockOption{Type: SharedType}
}
yyVAL.union = yyLOCAL
- case 554:
+ case 555:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3057
+//line sql.y:3049
{
yyLOCAL = &LockOption{Type: ExclusiveType}
}
yyVAL.union = yyLOCAL
- case 555:
+ case 556:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3061
+//line sql.y:3053
{
yyLOCAL = &Validation{With: true}
}
yyVAL.union = yyLOCAL
- case 556:
+ case 557:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:3065
+//line sql.y:3057
{
yyLOCAL = &Validation{With: false}
}
yyVAL.union = yyLOCAL
- case 557:
+ case 558:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3071
+//line sql.y:3063
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion()
@@ -12794,10 +12721,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 558:
+ case 559:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3078
+//line sql.y:3070
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion()
@@ -12805,10 +12732,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 559:
+ case 560:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3085
+//line sql.y:3077
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion()
@@ -12816,28 +12743,28 @@ yydefault:
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 560:
+ case 561:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3092
+//line sql.y:3084
{
yyDollar[1].alterTableUnion().FullyParsed = true
yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[2].partSpecUnion()
yyLOCAL = yyDollar[1].alterTableUnion()
}
yyVAL.union = yyLOCAL
- case 561:
+ case 562:
yyDollar = yyS[yypt-11 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3098
+//line sql.y:3090
{
yyLOCAL = &AlterView{ViewName: yyDollar[7].tableName.ToViewName(), Comments: Comments(yyDollar[2].strs).Parsed(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].definerUnion(), Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str}
}
yyVAL.union = yyLOCAL
- case 562:
+ case 563:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3108
+//line sql.y:3100
{
yyDollar[1].alterDatabaseUnion().FullyParsed = true
yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS
@@ -12845,10 +12772,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterDatabaseUnion()
}
yyVAL.union = yyLOCAL
- case 563:
+ case 564:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3115
+//line sql.y:3107
{
yyDollar[1].alterDatabaseUnion().FullyParsed = true
yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS
@@ -12856,10 +12783,10 @@ yydefault:
yyLOCAL = yyDollar[1].alterDatabaseUnion()
}
yyVAL.union = yyLOCAL
- case 564:
+ case 565:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3122
+//line sql.y:3114
{
yyLOCAL = &AlterVschema{
Action: CreateVindexDDLAction,
@@ -12872,10 +12799,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 565:
+ case 566:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3134
+//line sql.y:3126
{
yyLOCAL = &AlterVschema{
Action: DropVindexDDLAction,
@@ -12886,26 +12813,26 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 566:
+ case 567:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3144
+//line sql.y:3136
{
yyLOCAL = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[6].tableName}
}
yyVAL.union = yyLOCAL
- case 567:
+ case 568:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3148
+//line sql.y:3140
{
yyLOCAL = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[6].tableName}
}
yyVAL.union = yyLOCAL
- case 568:
+ case 569:
yyDollar = yyS[yypt-13 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3152
+//line sql.y:3144
{
yyLOCAL = &AlterVschema{
Action: AddColVindexDDLAction,
@@ -12919,10 +12846,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 569:
+ case 570:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3165
+//line sql.y:3157
{
yyLOCAL = &AlterVschema{
Action: DropColVindexDDLAction,
@@ -12933,18 +12860,18 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 570:
+ case 571:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3175
+//line sql.y:3167
{
yyLOCAL = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[6].tableName}
}
yyVAL.union = yyLOCAL
- case 571:
+ case 572:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3179
+//line sql.y:3171
{
yyLOCAL = &AlterVschema{
Action: AddAutoIncDDLAction,
@@ -12956,10 +12883,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 572:
+ case 573:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3190
+//line sql.y:3182
{
yyLOCAL = &AlterMigration{
Type: RetryMigrationType,
@@ -12967,10 +12894,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 573:
+ case 574:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3197
+//line sql.y:3189
{
yyLOCAL = &AlterMigration{
Type: CleanupMigrationType,
@@ -12978,10 +12905,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 574:
+ case 575:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3204
+//line sql.y:3196
{
yyLOCAL = &AlterMigration{
Type: LaunchMigrationType,
@@ -12989,10 +12916,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 575:
+ case 576:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3211
+//line sql.y:3203
{
yyLOCAL = &AlterMigration{
Type: LaunchMigrationType,
@@ -13001,20 +12928,20 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 576:
+ case 577:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3219
+//line sql.y:3211
{
yyLOCAL = &AlterMigration{
Type: LaunchAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 577:
+ case 578:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3225
+//line sql.y:3217
{
yyLOCAL = &AlterMigration{
Type: CompleteMigrationType,
@@ -13022,20 +12949,20 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 578:
+ case 579:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3232
+//line sql.y:3224
{
yyLOCAL = &AlterMigration{
Type: CompleteAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 579:
+ case 580:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3238
+//line sql.y:3230
{
yyLOCAL = &AlterMigration{
Type: CancelMigrationType,
@@ -13043,20 +12970,20 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 580:
+ case 581:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3245
+//line sql.y:3237
{
yyLOCAL = &AlterMigration{
Type: CancelAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 581:
+ case 582:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3251
+//line sql.y:3243
{
yyLOCAL = &AlterMigration{
Type: ThrottleMigrationType,
@@ -13066,10 +12993,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 582:
+ case 583:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3260
+//line sql.y:3252
{
yyLOCAL = &AlterMigration{
Type: ThrottleAllMigrationType,
@@ -13078,10 +13005,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 583:
+ case 584:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3268
+//line sql.y:3260
{
yyLOCAL = &AlterMigration{
Type: UnthrottleMigrationType,
@@ -13089,28 +13016,28 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 584:
+ case 585:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3275
+//line sql.y:3267
{
yyLOCAL = &AlterMigration{
Type: UnthrottleAllMigrationType,
}
}
yyVAL.union = yyLOCAL
- case 585:
+ case 586:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3282
+//line sql.y:3274
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 586:
+ case 587:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3286
+//line sql.y:3278
{
yyDollar[3].partitionOptionUnion().Partitions = yyDollar[4].integerUnion()
yyDollar[3].partitionOptionUnion().SubPartition = yyDollar[5].subPartitionUnion()
@@ -13118,10 +13045,10 @@ yydefault:
yyLOCAL = yyDollar[3].partitionOptionUnion()
}
yyVAL.union = yyLOCAL
- case 587:
+ case 588:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3295
+//line sql.y:3287
{
yyLOCAL = &PartitionOption{
IsLinear: yyDollar[1].booleanUnion(),
@@ -13130,10 +13057,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 588:
+ case 589:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3303
+//line sql.y:3295
{
yyLOCAL = &PartitionOption{
IsLinear: yyDollar[1].booleanUnion(),
@@ -13143,10 +13070,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 589:
+ case 590:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3312
+//line sql.y:3304
{
yyLOCAL = &PartitionOption{
Type: yyDollar[1].partitionByTypeUnion(),
@@ -13154,10 +13081,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 590:
+ case 591:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *PartitionOption
-//line sql.y:3319
+//line sql.y:3311
{
yyLOCAL = &PartitionOption{
Type: yyDollar[1].partitionByTypeUnion(),
@@ -13165,18 +13092,18 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 591:
+ case 592:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *SubPartition
-//line sql.y:3327
+//line sql.y:3319
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 592:
+ case 593:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *SubPartition
-//line sql.y:3331
+//line sql.y:3323
{
yyLOCAL = &SubPartition{
IsLinear: yyDollar[3].booleanUnion(),
@@ -13186,10 +13113,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 593:
+ case 594:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL *SubPartition
-//line sql.y:3340
+//line sql.y:3332
{
yyLOCAL = &SubPartition{
IsLinear: yyDollar[3].booleanUnion(),
@@ -13200,682 +13127,682 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 594:
+ case 595:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*PartitionDefinition
-//line sql.y:3351
+//line sql.y:3343
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 595:
+ case 596:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL []*PartitionDefinition
-//line sql.y:3355
+//line sql.y:3347
{
yyLOCAL = yyDollar[2].partDefsUnion()
}
yyVAL.union = yyLOCAL
- case 596:
+ case 597:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3360
+//line sql.y:3352
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 597:
+ case 598:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3364
+//line sql.y:3356
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 598:
+ case 599:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL int
-//line sql.y:3369
+//line sql.y:3361
{
yyLOCAL = 0
}
yyVAL.union = yyLOCAL
- case 599:
+ case 600:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL int
-//line sql.y:3373
+//line sql.y:3365
{
yyLOCAL = convertStringToInt(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 600:
+ case 601:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:3379
+//line sql.y:3371
{
yyLOCAL = &JSONTableExpr{Expr: yyDollar[3].exprUnion(), Filter: yyDollar[5].exprUnion(), Columns: yyDollar[6].jtColumnListUnion(), Alias: yyDollar[8].identifierCS}
}
yyVAL.union = yyLOCAL
- case 601:
+ case 602:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL []*JtColumnDefinition
-//line sql.y:3385
+//line sql.y:3377
{
yyLOCAL = yyDollar[3].jtColumnListUnion()
}
yyVAL.union = yyLOCAL
- case 602:
+ case 603:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*JtColumnDefinition
-//line sql.y:3391
+//line sql.y:3383
{
yyLOCAL = []*JtColumnDefinition{yyDollar[1].jtColumnDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 603:
+ case 604:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3395
+//line sql.y:3387
{
yySLICE := (*[]*JtColumnDefinition)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].jtColumnDefinitionUnion())
}
- case 604:
+ case 605:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3401
+//line sql.y:3393
{
yyLOCAL = &JtColumnDefinition{JtOrdinal: &JtOrdinalColDef{Name: yyDollar[1].identifierCI}}
}
yyVAL.union = yyLOCAL
- case 605:
+ case 606:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3405
+//line sql.y:3397
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 606:
+ case 607:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3411
+//line sql.y:3403
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 607:
+ case 608:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3417
+//line sql.y:3409
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 608:
+ case 609:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3423
+//line sql.y:3415
{
yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str}
jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()}
yyLOCAL = &JtColumnDefinition{JtPath: jtPath}
}
yyVAL.union = yyLOCAL
- case 609:
+ case 610:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *JtColumnDefinition
-//line sql.y:3429
+//line sql.y:3421
{
jtNestedPath := &JtNestedPathColDef{Path: yyDollar[3].exprUnion(), Columns: yyDollar[4].jtColumnListUnion()}
yyLOCAL = &JtColumnDefinition{JtNestedPath: jtNestedPath}
}
yyVAL.union = yyLOCAL
- case 610:
+ case 611:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3435
+//line sql.y:3427
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 611:
+ case 612:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3439
+//line sql.y:3431
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 612:
+ case 613:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3443
+//line sql.y:3435
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 613:
+ case 614:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3447
+//line sql.y:3439
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 614:
+ case 615:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3453
+//line sql.y:3445
{
yyLOCAL = yyDollar[1].jtOnResponseUnion()
}
yyVAL.union = yyLOCAL
- case 615:
+ case 616:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3459
+//line sql.y:3451
{
yyLOCAL = yyDollar[1].jtOnResponseUnion()
}
yyVAL.union = yyLOCAL
- case 616:
+ case 617:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3465
+//line sql.y:3457
{
yyLOCAL = &JtOnResponse{ResponseType: ErrorJSONType}
}
yyVAL.union = yyLOCAL
- case 617:
+ case 618:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3469
+//line sql.y:3461
{
yyLOCAL = &JtOnResponse{ResponseType: NullJSONType}
}
yyVAL.union = yyLOCAL
- case 618:
+ case 619:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *JtOnResponse
-//line sql.y:3473
+//line sql.y:3465
{
yyLOCAL = &JtOnResponse{ResponseType: DefaultJSONType, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 619:
+ case 620:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL PartitionByType
-//line sql.y:3479
+//line sql.y:3471
{
yyLOCAL = RangeType
}
yyVAL.union = yyLOCAL
- case 620:
+ case 621:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL PartitionByType
-//line sql.y:3483
+//line sql.y:3475
{
yyLOCAL = ListType
}
yyVAL.union = yyLOCAL
- case 621:
+ case 622:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL int
-//line sql.y:3488
+//line sql.y:3480
{
yyLOCAL = -1
}
yyVAL.union = yyLOCAL
- case 622:
+ case 623:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL int
-//line sql.y:3492
+//line sql.y:3484
{
yyLOCAL = convertStringToInt(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 623:
+ case 624:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL int
-//line sql.y:3497
+//line sql.y:3489
{
yyLOCAL = -1
}
yyVAL.union = yyLOCAL
- case 624:
+ case 625:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL int
-//line sql.y:3501
+//line sql.y:3493
{
yyLOCAL = convertStringToInt(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 625:
+ case 626:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3507
+//line sql.y:3499
{
yyLOCAL = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDefUnion()}}
}
yyVAL.union = yyLOCAL
- case 626:
+ case 627:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3511
+//line sql.y:3503
{
yyLOCAL = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 627:
+ case 628:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3515
+//line sql.y:3507
{
yyLOCAL = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitionsUnion(), Definitions: yyDollar[6].partDefsUnion()}
}
yyVAL.union = yyLOCAL
- case 628:
+ case 629:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3519
+//line sql.y:3511
{
yyLOCAL = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 629:
+ case 630:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3523
+//line sql.y:3515
{
yyLOCAL = &PartitionSpec{Action: DiscardAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 630:
+ case 631:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3527
+//line sql.y:3519
{
yyLOCAL = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 631:
+ case 632:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3531
+//line sql.y:3523
{
yyLOCAL = &PartitionSpec{Action: ImportAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 632:
+ case 633:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3535
+//line sql.y:3527
{
yyLOCAL = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 633:
+ case 634:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3539
+//line sql.y:3531
{
yyLOCAL = &PartitionSpec{Action: TruncateAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 634:
+ case 635:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3543
+//line sql.y:3535
{
yyLOCAL = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 635:
+ case 636:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3547
+//line sql.y:3539
{
yyLOCAL = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].identifierCI}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 636:
+ case 637:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3551
+//line sql.y:3543
{
yyLOCAL = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 637:
+ case 638:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3555
+//line sql.y:3547
{
yyLOCAL = &PartitionSpec{Action: AnalyzeAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 638:
+ case 639:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3559
+//line sql.y:3551
{
yyLOCAL = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 639:
+ case 640:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3563
+//line sql.y:3555
{
yyLOCAL = &PartitionSpec{Action: CheckAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 640:
+ case 641:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3567
+//line sql.y:3559
{
yyLOCAL = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 641:
+ case 642:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3571
+//line sql.y:3563
{
yyLOCAL = &PartitionSpec{Action: OptimizeAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 642:
+ case 643:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3575
+//line sql.y:3567
{
yyLOCAL = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 643:
+ case 644:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3579
+//line sql.y:3571
{
yyLOCAL = &PartitionSpec{Action: RebuildAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 644:
+ case 645:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3583
+//line sql.y:3575
{
yyLOCAL = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 645:
+ case 646:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3587
+//line sql.y:3579
{
yyLOCAL = &PartitionSpec{Action: RepairAction, IsAll: true}
}
yyVAL.union = yyLOCAL
- case 646:
+ case 647:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionSpec
-//line sql.y:3591
+//line sql.y:3583
{
yyLOCAL = &PartitionSpec{Action: UpgradeAction}
}
yyVAL.union = yyLOCAL
- case 647:
+ case 648:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3596
+//line sql.y:3588
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 648:
+ case 649:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:3600
+//line sql.y:3592
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 649:
+ case 650:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:3604
+//line sql.y:3596
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 650:
+ case 651:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*PartitionDefinition
-//line sql.y:3610
+//line sql.y:3602
{
yyLOCAL = []*PartitionDefinition{yyDollar[1].partDefUnion()}
}
yyVAL.union = yyLOCAL
- case 651:
+ case 652:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3614
+//line sql.y:3606
{
yySLICE := (*[]*PartitionDefinition)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].partDefUnion())
}
- case 652:
+ case 653:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:3620
+//line sql.y:3612
{
yyVAL.partDefUnion().Options = yyDollar[2].partitionDefinitionOptionsUnion()
}
- case 653:
+ case 654:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3625
+//line sql.y:3617
{
yyLOCAL = &PartitionDefinitionOptions{}
}
yyVAL.union = yyLOCAL
- case 654:
+ case 655:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3629
+//line sql.y:3621
{
yyDollar[1].partitionDefinitionOptionsUnion().ValueRange = yyDollar[2].partitionValueRangeUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 655:
+ case 656:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3634
+//line sql.y:3626
{
yyDollar[1].partitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 656:
+ case 657:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3639
+//line sql.y:3631
{
yyDollar[1].partitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 657:
+ case 658:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3644
+//line sql.y:3636
{
yyDollar[1].partitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 658:
+ case 659:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3649
+//line sql.y:3641
{
yyDollar[1].partitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 659:
+ case 660:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3654
+//line sql.y:3646
{
val := yyDollar[2].integerUnion()
yyDollar[1].partitionDefinitionOptionsUnion().MaxRows = &val
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 660:
+ case 661:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3660
+//line sql.y:3652
{
val := yyDollar[2].integerUnion()
yyDollar[1].partitionDefinitionOptionsUnion().MinRows = &val
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 661:
+ case 662:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3666
+//line sql.y:3658
{
yyDollar[1].partitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 662:
+ case 663:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinitionOptions
-//line sql.y:3671
+//line sql.y:3663
{
yyDollar[1].partitionDefinitionOptionsUnion().SubPartitionDefinitions = yyDollar[2].subPartitionDefinitionsUnion()
yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 663:
+ case 664:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SubPartitionDefinitions
-//line sql.y:3677
+//line sql.y:3669
{
yyLOCAL = yyDollar[2].subPartitionDefinitionsUnion()
}
yyVAL.union = yyLOCAL
- case 664:
+ case 665:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SubPartitionDefinitions
-//line sql.y:3683
+//line sql.y:3675
{
yyLOCAL = SubPartitionDefinitions{yyDollar[1].subPartitionDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 665:
+ case 666:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3687
+//line sql.y:3679
{
yySLICE := (*SubPartitionDefinitions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].subPartitionDefinitionUnion())
}
- case 666:
+ case 667:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SubPartitionDefinition
-//line sql.y:3693
+//line sql.y:3685
{
yyLOCAL = &SubPartitionDefinition{Name: yyDollar[2].identifierCI, Options: yyDollar[3].subPartitionDefinitionOptionsUnion()}
}
yyVAL.union = yyLOCAL
- case 667:
+ case 668:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3698
+//line sql.y:3690
{
yyLOCAL = &SubPartitionDefinitionOptions{}
}
yyVAL.union = yyLOCAL
- case 668:
+ case 669:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3702
+//line sql.y:3694
{
yyDollar[1].subPartitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 669:
+ case 670:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3707
+//line sql.y:3699
{
yyDollar[1].subPartitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 670:
+ case 671:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3712
+//line sql.y:3704
{
yyDollar[1].subPartitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 671:
+ case 672:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3717
+//line sql.y:3709
{
yyDollar[1].subPartitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion()
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 672:
+ case 673:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3722
+//line sql.y:3714
{
val := yyDollar[2].integerUnion()
yyDollar[1].subPartitionDefinitionOptionsUnion().MaxRows = &val
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 673:
+ case 674:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3728
+//line sql.y:3720
{
val := yyDollar[2].integerUnion()
yyDollar[1].subPartitionDefinitionOptionsUnion().MinRows = &val
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 674:
+ case 675:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *SubPartitionDefinitionOptions
-//line sql.y:3734
+//line sql.y:3726
{
yyDollar[1].subPartitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str
yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion()
}
yyVAL.union = yyLOCAL
- case 675:
+ case 676:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionValueRange
-//line sql.y:3741
+//line sql.y:3733
{
yyLOCAL = &PartitionValueRange{
Type: LessThanType,
@@ -13883,10 +13810,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 676:
+ case 677:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionValueRange
-//line sql.y:3748
+//line sql.y:3740
{
yyLOCAL = &PartitionValueRange{
Type: LessThanType,
@@ -13894,10 +13821,10 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 677:
+ case 678:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *PartitionValueRange
-//line sql.y:3755
+//line sql.y:3747
{
yyLOCAL = &PartitionValueRange{
Type: InType,
@@ -13905,131 +13832,131 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 678:
+ case 679:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:3763
+//line sql.y:3755
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 679:
+ case 680:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:3767
+//line sql.y:3759
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 680:
+ case 681:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *PartitionEngine
-//line sql.y:3773
+//line sql.y:3765
{
yyLOCAL = &PartitionEngine{Storage: yyDollar[1].booleanUnion(), Name: yyDollar[4].identifierCS.String()}
}
yyVAL.union = yyLOCAL
- case 681:
+ case 682:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:3779
+//line sql.y:3771
{
yyLOCAL = NewStrLiteral(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 682:
+ case 683:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:3785
+//line sql.y:3777
{
yyLOCAL = NewStrLiteral(yyDollar[4].str)
}
yyVAL.union = yyLOCAL
- case 683:
+ case 684:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Literal
-//line sql.y:3791
+//line sql.y:3783
{
yyLOCAL = NewStrLiteral(yyDollar[4].str)
}
yyVAL.union = yyLOCAL
- case 684:
+ case 685:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL int
-//line sql.y:3797
+//line sql.y:3789
{
yyLOCAL = convertStringToInt(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 685:
+ case 686:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL int
-//line sql.y:3803
+//line sql.y:3795
{
yyLOCAL = convertStringToInt(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 686:
+ case 687:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3809
+//line sql.y:3801
{
yyVAL.str = yyDollar[3].identifierCS.String()
}
- case 687:
+ case 688:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *PartitionDefinition
-//line sql.y:3815
+//line sql.y:3807
{
yyLOCAL = &PartitionDefinition{Name: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 688:
+ case 689:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:3821
+//line sql.y:3813
{
yyVAL.str = ""
}
- case 689:
+ case 690:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:3825
+//line sql.y:3817
{
yyVAL.str = ""
}
- case 690:
+ case 691:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3831
+//line sql.y:3823
{
yyLOCAL = &RenameTable{TablePairs: yyDollar[3].renameTablePairsUnion()}
}
yyVAL.union = yyLOCAL
- case 691:
+ case 692:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL []*RenameTablePair
-//line sql.y:3837
+//line sql.y:3829
{
yyLOCAL = []*RenameTablePair{{FromTable: yyDollar[1].tableName, ToTable: yyDollar[3].tableName}}
}
yyVAL.union = yyLOCAL
- case 692:
+ case 693:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:3841
+//line sql.y:3833
{
yySLICE := (*[]*RenameTablePair)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, &RenameTablePair{FromTable: yyDollar[3].tableName, ToTable: yyDollar[5].tableName})
}
- case 693:
+ case 694:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3847
+//line sql.y:3839
{
yyLOCAL = &DropTable{FromTables: yyDollar[6].tableNamesUnion(), IfExists: yyDollar[5].booleanUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Temp: yyDollar[3].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 694:
+ case 695:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3851
+//line sql.y:3843
{
// Change this to an alter statement
if yyDollar[4].identifierCI.Lowered() == "primary" {
@@ -14039,1216 +13966,1216 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 695:
+ case 696:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3860
+//line sql.y:3852
{
yyLOCAL = &DropView{FromTables: yyDollar[5].tableNamesUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), IfExists: yyDollar[4].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 696:
+ case 697:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3864
+//line sql.y:3856
{
yyLOCAL = &DropDatabase{Comments: Comments(yyDollar[2].strs).Parsed(), DBName: yyDollar[5].identifierCS, IfExists: yyDollar[4].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 697:
+ case 698:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3870
+//line sql.y:3862
{
yyLOCAL = &TruncateTable{Table: yyDollar[3].tableName}
}
yyVAL.union = yyLOCAL
- case 698:
+ case 699:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3874
+//line sql.y:3866
{
yyLOCAL = &TruncateTable{Table: yyDollar[2].tableName}
}
yyVAL.union = yyLOCAL
- case 699:
+ case 700:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3880
+//line sql.y:3872
{
yyLOCAL = &OtherRead{}
}
yyVAL.union = yyLOCAL
- case 700:
+ case 701:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3886
+//line sql.y:3878
{
yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 701:
+ case 702:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3890
+//line sql.y:3882
{
yyLOCAL = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 702:
+ case 703:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3894
+//line sql.y:3886
{
yyLOCAL = &Show{&ShowBasic{Full: yyDollar[2].booleanUnion(), Command: Column, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 703:
+ case 704:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3898
+//line sql.y:3890
{
yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 704:
+ case 705:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3902
+//line sql.y:3894
{
yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 705:
+ case 706:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3906
+//line sql.y:3898
{
yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 706:
+ case 707:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3910
+//line sql.y:3902
{
yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 707:
+ case 708:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3914
+//line sql.y:3906
{
yyLOCAL = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 708:
+ case 709:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3918
+//line sql.y:3910
{
yyLOCAL = &Show{&ShowBasic{Command: Index, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 709:
+ case 710:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3922
+//line sql.y:3914
{
yyLOCAL = &Show{&ShowBasic{Command: OpenTable, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 710:
+ case 711:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3926
+//line sql.y:3918
{
yyLOCAL = &Show{&ShowBasic{Command: Privilege}}
}
yyVAL.union = yyLOCAL
- case 711:
+ case 712:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3930
+//line sql.y:3922
{
yyLOCAL = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 712:
+ case 713:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3934
+//line sql.y:3926
{
yyLOCAL = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 713:
+ case 714:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3938
+//line sql.y:3930
{
yyLOCAL = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 714:
+ case 715:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3942
+//line sql.y:3934
{
yyLOCAL = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 715:
+ case 716:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3946
+//line sql.y:3938
{
yyLOCAL = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 716:
+ case 717:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3950
+//line sql.y:3942
{
yyLOCAL = &Show{&ShowBasic{Command: TableStatus, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 717:
+ case 718:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3954
+//line sql.y:3946
{
yyLOCAL = &Show{&ShowBasic{Command: Table, Full: yyDollar[2].booleanUnion(), DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 718:
+ case 719:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3958
+//line sql.y:3950
{
yyLOCAL = &Show{&ShowBasic{Command: Trigger, DbName: yyDollar[3].identifierCS, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 719:
+ case 720:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3962
+//line sql.y:3954
{
yyLOCAL = &Show{&ShowCreate{Command: CreateDb, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 720:
+ case 721:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3966
+//line sql.y:3958
{
yyLOCAL = &Show{&ShowCreate{Command: CreateE, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 721:
+ case 722:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3970
+//line sql.y:3962
{
yyLOCAL = &Show{&ShowCreate{Command: CreateF, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 722:
+ case 723:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3974
+//line sql.y:3966
{
yyLOCAL = &Show{&ShowCreate{Command: CreateProc, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 723:
+ case 724:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3978
+//line sql.y:3970
{
yyLOCAL = &Show{&ShowCreate{Command: CreateTbl, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 724:
+ case 725:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3982
+//line sql.y:3974
{
yyLOCAL = &Show{&ShowCreate{Command: CreateTr, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 725:
+ case 726:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3986
+//line sql.y:3978
{
yyLOCAL = &Show{&ShowCreate{Command: CreateV, Op: yyDollar[4].tableName}}
}
yyVAL.union = yyLOCAL
- case 726:
+ case 727:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3990
+//line sql.y:3982
{
yyLOCAL = &Show{&ShowBasic{Command: Engines}}
}
yyVAL.union = yyLOCAL
- case 727:
+ case 728:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3994
+//line sql.y:3986
{
yyLOCAL = &Show{&ShowBasic{Command: Plugins}}
}
yyVAL.union = yyLOCAL
- case 728:
+ case 729:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:3998
+//line sql.y:3990
{
yyLOCAL = &Show{&ShowBasic{Command: GtidExecGlobal, DbName: yyDollar[4].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 729:
+ case 730:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4002
+//line sql.y:3994
{
yyLOCAL = &Show{&ShowBasic{Command: VGtidExecGlobal, DbName: yyDollar[4].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 730:
+ case 731:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4006
+//line sql.y:3998
{
yyLOCAL = &Show{&ShowBasic{Command: VitessVariables, Filter: yyDollar[4].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 731:
+ case 732:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4010
+//line sql.y:4002
{
yyLOCAL = &Show{&ShowBasic{Command: VitessMigrations, Filter: yyDollar[4].showFilterUnion(), DbName: yyDollar[3].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 732:
+ case 733:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4014
+//line sql.y:4006
{
yyLOCAL = &ShowMigrationLogs{UUID: string(yyDollar[3].str)}
}
yyVAL.union = yyLOCAL
- case 733:
+ case 734:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4018
+//line sql.y:4010
{
yyLOCAL = &ShowThrottledApps{}
}
yyVAL.union = yyLOCAL
- case 734:
+ case 735:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4022
+//line sql.y:4014
{
yyLOCAL = &Show{&ShowBasic{Command: VitessReplicationStatus, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 735:
+ case 736:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4026
+//line sql.y:4018
{
yyLOCAL = &Show{&ShowBasic{Command: VschemaTables}}
}
yyVAL.union = yyLOCAL
- case 736:
+ case 737:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4030
+//line sql.y:4022
{
yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes}}
}
yyVAL.union = yyLOCAL
- case 737:
+ case 738:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4034
+//line sql.y:4026
{
yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes, Tbl: yyDollar[5].tableName}}
}
yyVAL.union = yyLOCAL
- case 738:
+ case 739:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4038
+//line sql.y:4030
{
yyLOCAL = &Show{&ShowBasic{Command: Warnings}}
}
yyVAL.union = yyLOCAL
- case 739:
+ case 740:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4042
+//line sql.y:4034
{
yyLOCAL = &Show{&ShowBasic{Command: VitessShards, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 740:
+ case 741:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4046
+//line sql.y:4038
{
yyLOCAL = &Show{&ShowBasic{Command: VitessTablets, Filter: yyDollar[3].showFilterUnion()}}
}
yyVAL.union = yyLOCAL
- case 741:
+ case 742:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4050
+//line sql.y:4042
{
yyLOCAL = &Show{&ShowBasic{Command: VitessTarget}}
}
yyVAL.union = yyLOCAL
- case 742:
+ case 743:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4057
+//line sql.y:4049
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].identifierCI.String())}}
}
yyVAL.union = yyLOCAL
- case 743:
+ case 744:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4061
+//line sql.y:4053
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}}
}
yyVAL.union = yyLOCAL
- case 744:
+ case 745:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4065
+//line sql.y:4057
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()}}
}
yyVAL.union = yyLOCAL
- case 745:
+ case 746:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4069
+//line sql.y:4061
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}}
}
yyVAL.union = yyLOCAL
- case 746:
+ case 747:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4073
+//line sql.y:4065
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}}
}
yyVAL.union = yyLOCAL
- case 747:
+ case 748:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4077
+//line sql.y:4069
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}}
}
yyVAL.union = yyLOCAL
- case 748:
+ case 749:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4081
+//line sql.y:4073
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}}
}
yyVAL.union = yyLOCAL
- case 749:
+ case 750:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4085
+//line sql.y:4077
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[3].str)}}
}
yyVAL.union = yyLOCAL
- case 750:
+ case 751:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4089
+//line sql.y:4081
{
yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}}
}
yyVAL.union = yyLOCAL
- case 751:
+ case 752:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4095
+//line sql.y:4087
{
yyVAL.str = ""
}
- case 752:
+ case 753:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4099
+//line sql.y:4091
{
yyVAL.str = "extended "
}
- case 753:
+ case 754:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:4105
+//line sql.y:4097
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 754:
+ case 755:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4109
+//line sql.y:4101
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 755:
+ case 756:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4115
+//line sql.y:4107
{
yyVAL.str = string(yyDollar[1].str)
}
- case 756:
+ case 757:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4119
+//line sql.y:4111
{
yyVAL.str = string(yyDollar[1].str)
}
- case 757:
+ case 758:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4125
+//line sql.y:4117
{
yyVAL.identifierCS = NewIdentifierCS("")
}
- case 758:
+ case 759:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4129
+//line sql.y:4121
{
yyVAL.identifierCS = yyDollar[2].identifierCS
}
- case 759:
+ case 760:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4133
+//line sql.y:4125
{
yyVAL.identifierCS = yyDollar[2].identifierCS
}
- case 760:
+ case 761:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4139
+//line sql.y:4131
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 761:
+ case 762:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4143
+//line sql.y:4135
{
yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 762:
+ case 763:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4147
+//line sql.y:4139
{
yyLOCAL = &ShowFilter{Filter: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 763:
+ case 764:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4153
+//line sql.y:4145
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 764:
+ case 765:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ShowFilter
-//line sql.y:4157
+//line sql.y:4149
{
yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)}
}
yyVAL.union = yyLOCAL
- case 765:
+ case 766:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4163
+//line sql.y:4155
{
yyVAL.empty = struct{}{}
}
- case 766:
+ case 767:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4167
+//line sql.y:4159
{
yyVAL.empty = struct{}{}
}
- case 767:
+ case 768:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4171
+//line sql.y:4163
{
yyVAL.empty = struct{}{}
}
- case 768:
+ case 769:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4177
+//line sql.y:4169
{
yyVAL.str = string(yyDollar[1].str)
}
- case 769:
+ case 770:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4181
+//line sql.y:4173
{
yyVAL.str = string(yyDollar[1].str)
}
- case 770:
+ case 771:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4187
+//line sql.y:4179
{
yyLOCAL = &Use{DBName: yyDollar[2].identifierCS}
}
yyVAL.union = yyLOCAL
- case 771:
+ case 772:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4191
+//line sql.y:4183
{
yyLOCAL = &Use{DBName: IdentifierCS{v: ""}}
}
yyVAL.union = yyLOCAL
- case 772:
+ case 773:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4195
+//line sql.y:4187
{
yyLOCAL = &Use{DBName: NewIdentifierCS(yyDollar[2].identifierCS.String() + "@" + string(yyDollar[3].str))}
}
yyVAL.union = yyLOCAL
- case 773:
+ case 774:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4202
+//line sql.y:4194
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 774:
+ case 775:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4206
+//line sql.y:4198
{
yyVAL.identifierCS = NewIdentifierCS("@" + string(yyDollar[1].str))
}
- case 775:
+ case 776:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4210
+//line sql.y:4202
{
yyVAL.identifierCS = NewIdentifierCS("@@" + string(yyDollar[1].str))
}
- case 776:
+ case 777:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4214
+//line sql.y:4206
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 777:
+ case 778:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4221
+//line sql.y:4213
{
yyLOCAL = &Begin{}
}
yyVAL.union = yyLOCAL
- case 778:
+ case 779:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4225
+//line sql.y:4217
{
yyLOCAL = &Begin{}
}
yyVAL.union = yyLOCAL
- case 779:
+ case 780:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4231
+//line sql.y:4223
{
yyLOCAL = &Commit{}
}
yyVAL.union = yyLOCAL
- case 780:
+ case 781:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4237
+//line sql.y:4229
{
yyLOCAL = &Rollback{}
}
yyVAL.union = yyLOCAL
- case 781:
+ case 782:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4241
+//line sql.y:4233
{
yyLOCAL = &SRollback{Name: yyDollar[5].identifierCI}
}
yyVAL.union = yyLOCAL
- case 782:
+ case 783:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4246
+//line sql.y:4238
{
yyVAL.empty = struct{}{}
}
- case 783:
+ case 784:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4248
+//line sql.y:4240
{
yyVAL.empty = struct{}{}
}
- case 784:
+ case 785:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4251
+//line sql.y:4243
{
yyVAL.empty = struct{}{}
}
- case 785:
+ case 786:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4253
+//line sql.y:4245
{
yyVAL.empty = struct{}{}
}
- case 786:
+ case 787:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4257
+//line sql.y:4249
{
yyLOCAL = &Savepoint{Name: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 787:
+ case 788:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4263
+//line sql.y:4255
{
yyLOCAL = &Release{Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 788:
+ case 789:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4268
+//line sql.y:4260
{
yyLOCAL = EmptyType
}
yyVAL.union = yyLOCAL
- case 789:
+ case 790:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4272
+//line sql.y:4264
{
yyLOCAL = JSONType
}
yyVAL.union = yyLOCAL
- case 790:
+ case 791:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4276
+//line sql.y:4268
{
yyLOCAL = TreeType
}
yyVAL.union = yyLOCAL
- case 791:
+ case 792:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4280
+//line sql.y:4272
{
yyLOCAL = VitessType
}
yyVAL.union = yyLOCAL
- case 792:
+ case 793:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4284
+//line sql.y:4276
{
yyLOCAL = VTExplainType
}
yyVAL.union = yyLOCAL
- case 793:
+ case 794:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4288
+//line sql.y:4280
{
yyLOCAL = TraditionalType
}
yyVAL.union = yyLOCAL
- case 794:
+ case 795:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ExplainType
-//line sql.y:4292
+//line sql.y:4284
{
yyLOCAL = AnalyzeType
}
yyVAL.union = yyLOCAL
- case 795:
+ case 796:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4298
+//line sql.y:4290
{
yyVAL.str = yyDollar[1].str
}
- case 796:
+ case 797:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4302
+//line sql.y:4294
{
yyVAL.str = yyDollar[1].str
}
- case 797:
+ case 798:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4306
+//line sql.y:4298
{
yyVAL.str = yyDollar[1].str
}
- case 798:
+ case 799:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4312
+//line sql.y:4304
{
yyLOCAL = yyDollar[1].selStmtUnion()
}
yyVAL.union = yyLOCAL
- case 799:
+ case 800:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4316
+//line sql.y:4308
{
yyLOCAL = yyDollar[1].statementUnion()
}
yyVAL.union = yyLOCAL
- case 800:
+ case 801:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4320
+//line sql.y:4312
{
yyLOCAL = yyDollar[1].statementUnion()
}
yyVAL.union = yyLOCAL
- case 801:
+ case 802:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4324
+//line sql.y:4316
{
yyLOCAL = yyDollar[1].statementUnion()
}
yyVAL.union = yyLOCAL
- case 802:
+ case 803:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4329
+//line sql.y:4321
{
yyVAL.str = ""
}
- case 803:
+ case 804:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4333
+//line sql.y:4325
{
yyVAL.str = yyDollar[1].identifierCI.val
}
- case 804:
+ case 805:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4337
+//line sql.y:4329
{
yyVAL.str = encodeSQLString(yyDollar[1].str)
}
- case 805:
+ case 806:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4343
+//line sql.y:4335
{
yyLOCAL = &ExplainTab{Table: yyDollar[3].tableName, Wild: yyDollar[4].str}
}
yyVAL.union = yyLOCAL
- case 806:
+ case 807:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4347
+//line sql.y:4339
{
yyLOCAL = &ExplainStmt{Type: yyDollar[3].explainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()}
}
yyVAL.union = yyLOCAL
- case 807:
+ case 808:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4353
+//line sql.y:4345
{
yyLOCAL = &OtherAdmin{}
}
yyVAL.union = yyLOCAL
- case 808:
+ case 809:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4357
+//line sql.y:4349
{
yyLOCAL = &OtherAdmin{}
}
yyVAL.union = yyLOCAL
- case 809:
+ case 810:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4363
+//line sql.y:4355
{
yyLOCAL = &LockTables{Tables: yyDollar[3].tableAndLockTypesUnion()}
}
yyVAL.union = yyLOCAL
- case 810:
+ case 811:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableAndLockTypes
-//line sql.y:4369
+//line sql.y:4361
{
yyLOCAL = TableAndLockTypes{yyDollar[1].tableAndLockTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 811:
+ case 812:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4373
+//line sql.y:4365
{
yySLICE := (*TableAndLockTypes)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableAndLockTypeUnion())
}
- case 812:
+ case 813:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *TableAndLockType
-//line sql.y:4379
+//line sql.y:4371
{
yyLOCAL = &TableAndLockType{Table: yyDollar[1].aliasedTableNameUnion(), Lock: yyDollar[2].lockTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 813:
+ case 814:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4385
+//line sql.y:4377
{
yyLOCAL = Read
}
yyVAL.union = yyLOCAL
- case 814:
+ case 815:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4389
+//line sql.y:4381
{
yyLOCAL = ReadLocal
}
yyVAL.union = yyLOCAL
- case 815:
+ case 816:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4393
+//line sql.y:4385
{
yyLOCAL = Write
}
yyVAL.union = yyLOCAL
- case 816:
+ case 817:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL LockType
-//line sql.y:4397
+//line sql.y:4389
{
yyLOCAL = LowPriorityWrite
}
yyVAL.union = yyLOCAL
- case 817:
+ case 818:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4403
+//line sql.y:4395
{
yyLOCAL = &UnlockTables{}
}
yyVAL.union = yyLOCAL
- case 818:
+ case 819:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4409
+//line sql.y:4401
{
yyLOCAL = &RevertMigration{Comments: Comments(yyDollar[2].strs).Parsed(), UUID: string(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 819:
+ case 820:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4415
+//line sql.y:4407
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), FlushOptions: yyDollar[3].strs}
}
yyVAL.union = yyLOCAL
- case 820:
+ case 821:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4419
+//line sql.y:4411
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 821:
+ case 822:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4423
+//line sql.y:4415
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), WithLock: true}
}
yyVAL.union = yyLOCAL
- case 822:
+ case 823:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4427
+//line sql.y:4419
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion()}
}
yyVAL.union = yyLOCAL
- case 823:
+ case 824:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4431
+//line sql.y:4423
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), WithLock: true}
}
yyVAL.union = yyLOCAL
- case 824:
+ case 825:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4435
+//line sql.y:4427
{
yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), ForExport: true}
}
yyVAL.union = yyLOCAL
- case 825:
+ case 826:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4441
+//line sql.y:4433
{
yyVAL.strs = []string{yyDollar[1].str}
}
- case 826:
+ case 827:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4445
+//line sql.y:4437
{
yyVAL.strs = append(yyDollar[1].strs, yyDollar[3].str)
}
- case 827:
- yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4451
- {
- yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
- }
case 828:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4455
+//line sql.y:4443
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
case 829:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4459
+//line sql.y:4447
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
case 830:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4463
+//line sql.y:4451
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
case 831:
- yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4467
+ yyDollar = yyS[yypt-2 : yypt+1]
+//line sql.y:4455
{
- yyVAL.str = string(yyDollar[1].str)
+ yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
case 832:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4471
+//line sql.y:4459
{
yyVAL.str = string(yyDollar[1].str)
}
case 833:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4475
+//line sql.y:4463
{
yyVAL.str = string(yyDollar[1].str)
}
case 834:
+ yyDollar = yyS[yypt-1 : yypt+1]
+//line sql.y:4467
+ {
+ yyVAL.str = string(yyDollar[1].str)
+ }
+ case 835:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4479
+//line sql.y:4471
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + yyDollar[3].str
}
- case 835:
+ case 836:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4483
+//line sql.y:4475
{
yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str)
}
- case 836:
+ case 837:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4487
+//line sql.y:4479
{
yyVAL.str = string(yyDollar[1].str)
}
- case 837:
+ case 838:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4491
+//line sql.y:4483
{
yyVAL.str = string(yyDollar[1].str)
}
- case 838:
+ case 839:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4495
+//line sql.y:4487
{
yyVAL.str = string(yyDollar[1].str)
}
- case 839:
+ case 840:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:4500
+//line sql.y:4492
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 840:
+ case 841:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4504
+//line sql.y:4496
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 841:
+ case 842:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4508
+//line sql.y:4500
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 842:
+ case 843:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4513
+//line sql.y:4505
{
yyVAL.str = ""
}
- case 843:
+ case 844:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4517
+//line sql.y:4509
{
yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()
}
- case 844:
+ case 845:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4522
+//line sql.y:4514
{
setAllowComments(yylex, true)
}
- case 845:
+ case 846:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4526
+//line sql.y:4518
{
yyVAL.strs = yyDollar[2].strs
setAllowComments(yylex, false)
}
- case 846:
+ case 847:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4532
+//line sql.y:4524
{
yyVAL.strs = nil
}
- case 847:
+ case 848:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4536
+//line sql.y:4528
{
yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str)
}
- case 848:
+ case 849:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4542
+//line sql.y:4534
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 849:
+ case 850:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:4546
+//line sql.y:4538
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 850:
+ case 851:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:4550
+//line sql.y:4542
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 851:
+ case 852:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4555
+//line sql.y:4547
{
yyVAL.str = ""
}
- case 852:
+ case 853:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4559
+//line sql.y:4551
{
yyVAL.str = SQLNoCacheStr
}
- case 853:
+ case 854:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4563
+//line sql.y:4555
{
yyVAL.str = SQLCacheStr
}
- case 854:
+ case 855:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:4568
+//line sql.y:4560
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 855:
+ case 856:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4572
+//line sql.y:4564
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 856:
+ case 857:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:4576
+//line sql.y:4568
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 857:
+ case 858:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4582
+//line sql.y:4574
{
yyLOCAL = &PrepareStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Statement: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 858:
+ case 859:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4586
+//line sql.y:4578
{
yyLOCAL = &PrepareStmt{
Name: yyDollar[3].identifierCI,
@@ -15257,595 +15184,595 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 859:
+ case 860:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4596
+//line sql.y:4588
{
yyLOCAL = &ExecuteStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Arguments: yyDollar[4].variablesUnion()}
}
yyVAL.union = yyLOCAL
- case 860:
+ case 861:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*Variable
-//line sql.y:4601
+//line sql.y:4593
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 861:
+ case 862:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []*Variable
-//line sql.y:4605
+//line sql.y:4597
{
yyLOCAL = yyDollar[2].variablesUnion()
}
yyVAL.union = yyLOCAL
- case 862:
+ case 863:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4611
+//line sql.y:4603
{
yyLOCAL = &DeallocateStmt{Type: DeallocateType, Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI}
}
yyVAL.union = yyLOCAL
- case 863:
+ case 864:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Statement
-//line sql.y:4615
+//line sql.y:4607
{
yyLOCAL = &DeallocateStmt{Type: DropType, Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI}
}
yyVAL.union = yyLOCAL
- case 864:
+ case 865:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL SelectExprs
-//line sql.y:4620
+//line sql.y:4612
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 865:
+ case 866:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectExprs
-//line sql.y:4624
+//line sql.y:4616
{
yyLOCAL = yyDollar[1].selectExprsUnion()
}
yyVAL.union = yyLOCAL
- case 866:
+ case 867:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4629
+//line sql.y:4621
{
yyVAL.strs = nil
}
- case 867:
+ case 868:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4633
+//line sql.y:4625
{
yyVAL.strs = []string{yyDollar[1].str}
}
- case 868:
+ case 869:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4637
+//line sql.y:4629
{ // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce'
yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str}
}
- case 869:
+ case 870:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4641
+//line sql.y:4633
{
yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str}
}
- case 870:
+ case 871:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:4645
+//line sql.y:4637
{
yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str}
}
- case 871:
+ case 872:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4651
+//line sql.y:4643
{
yyVAL.str = SQLNoCacheStr
}
- case 872:
+ case 873:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4655
+//line sql.y:4647
{
yyVAL.str = SQLCacheStr
}
- case 873:
+ case 874:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4659
+//line sql.y:4651
{
yyVAL.str = DistinctStr
}
- case 874:
+ case 875:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4663
+//line sql.y:4655
{
yyVAL.str = DistinctStr
}
- case 875:
+ case 876:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4667
+//line sql.y:4659
{
yyVAL.str = StraightJoinHint
}
- case 876:
+ case 877:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4671
+//line sql.y:4663
{
yyVAL.str = SQLCalcFoundRowsStr
}
- case 877:
+ case 878:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4675
+//line sql.y:4667
{
yyVAL.str = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway
}
- case 878:
+ case 879:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectExprs
-//line sql.y:4681
+//line sql.y:4673
{
yyLOCAL = SelectExprs{yyDollar[1].selectExprUnion()}
}
yyVAL.union = yyLOCAL
- case 879:
+ case 880:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4685
+//line sql.y:4677
{
yySLICE := (*SelectExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].selectExprUnion())
}
- case 880:
+ case 881:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4691
+//line sql.y:4683
{
yyLOCAL = &StarExpr{}
}
yyVAL.union = yyLOCAL
- case 881:
+ case 882:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4695
+//line sql.y:4687
{
yyLOCAL = &AliasedExpr{Expr: yyDollar[1].exprUnion(), As: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 882:
+ case 883:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4699
+//line sql.y:4691
{
yyLOCAL = &StarExpr{TableName: TableName{Name: yyDollar[1].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 883:
+ case 884:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL SelectExpr
-//line sql.y:4703
+//line sql.y:4695
{
yyLOCAL = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}}
}
yyVAL.union = yyLOCAL
- case 884:
+ case 885:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4708
+//line sql.y:4700
{
yyVAL.identifierCI = IdentifierCI{}
}
- case 885:
+ case 886:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4712
+//line sql.y:4704
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 886:
+ case 887:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4716
+//line sql.y:4708
{
yyVAL.identifierCI = yyDollar[2].identifierCI
}
- case 888:
+ case 889:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4723
+//line sql.y:4715
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 889:
+ case 890:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4728
+//line sql.y:4720
{
yyLOCAL = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewIdentifierCS("dual")}}}
}
yyVAL.union = yyLOCAL
- case 890:
+ case 891:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4732
+//line sql.y:4724
{
yyLOCAL = yyDollar[1].tableExprsUnion()
}
yyVAL.union = yyLOCAL
- case 891:
+ case 892:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4738
+//line sql.y:4730
{
yyLOCAL = yyDollar[2].tableExprsUnion()
}
yyVAL.union = yyLOCAL
- case 892:
+ case 893:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExprs
-//line sql.y:4744
+//line sql.y:4736
{
yyLOCAL = TableExprs{yyDollar[1].tableExprUnion()}
}
yyVAL.union = yyLOCAL
- case 893:
+ case 894:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4748
+//line sql.y:4740
{
yySLICE := (*TableExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].tableExprUnion())
}
- case 896:
+ case 897:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4758
+//line sql.y:4750
{
yyLOCAL = yyDollar[1].aliasedTableNameUnion()
}
yyVAL.union = yyLOCAL
- case 897:
+ case 898:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4762
+//line sql.y:4754
{
yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].derivedTableUnion(), As: yyDollar[3].identifierCS, Columns: yyDollar[4].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 898:
+ case 899:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4766
+//line sql.y:4758
{
yyLOCAL = &ParenTableExpr{Exprs: yyDollar[2].tableExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 899:
+ case 900:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4770
+//line sql.y:4762
{
yyLOCAL = yyDollar[1].tableExprUnion()
}
yyVAL.union = yyLOCAL
- case 900:
+ case 901:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *DerivedTable
-//line sql.y:4776
+//line sql.y:4768
{
yyLOCAL = &DerivedTable{Lateral: false, Select: yyDollar[1].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 901:
+ case 902:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *DerivedTable
-//line sql.y:4780
+//line sql.y:4772
{
yyLOCAL = &DerivedTable{Lateral: true, Select: yyDollar[2].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 902:
+ case 903:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *AliasedTableExpr
-//line sql.y:4786
+//line sql.y:4778
{
yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].identifierCS, Hints: yyDollar[3].indexHintsUnion()}
}
yyVAL.union = yyLOCAL
- case 903:
+ case 904:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL *AliasedTableExpr
-//line sql.y:4790
+//line sql.y:4782
{
yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitionsUnion(), As: yyDollar[6].identifierCS, Hints: yyDollar[7].indexHintsUnion()}
}
yyVAL.union = yyLOCAL
- case 904:
+ case 905:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4795
+//line sql.y:4787
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 905:
+ case 906:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4799
+//line sql.y:4791
{
yyLOCAL = yyDollar[2].columnsUnion()
}
yyVAL.union = yyLOCAL
- case 906:
+ case 907:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4804
+//line sql.y:4796
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 907:
+ case 908:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4808
+//line sql.y:4800
{
yyLOCAL = yyDollar[1].columnsUnion()
}
yyVAL.union = yyLOCAL
- case 908:
+ case 909:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4814
+//line sql.y:4806
{
yyLOCAL = Columns{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 909:
+ case 910:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4818
+//line sql.y:4810
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 910:
+ case 911:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*Variable
-//line sql.y:4824
+//line sql.y:4816
{
yyLOCAL = []*Variable{yyDollar[1].variableUnion()}
}
yyVAL.union = yyLOCAL
- case 911:
+ case 912:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4828
+//line sql.y:4820
{
yySLICE := (*[]*Variable)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].variableUnion())
}
- case 912:
+ case 913:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4834
+//line sql.y:4826
{
yyLOCAL = Columns{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 913:
+ case 914:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:4838
+//line sql.y:4830
{
yyLOCAL = Columns{NewIdentifierCI(string(yyDollar[1].str))}
}
yyVAL.union = yyLOCAL
- case 914:
+ case 915:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4842
+//line sql.y:4834
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 915:
+ case 916:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4846
+//line sql.y:4838
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, NewIdentifierCI(string(yyDollar[3].str)))
}
- case 916:
+ case 917:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Partitions
-//line sql.y:4852
+//line sql.y:4844
{
yyLOCAL = Partitions{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 917:
+ case 918:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4856
+//line sql.y:4848
{
yySLICE := (*Partitions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 918:
+ case 919:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4869
+//line sql.y:4861
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition}
}
yyVAL.union = yyLOCAL
- case 919:
+ case 920:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4873
+//line sql.y:4865
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition}
}
yyVAL.union = yyLOCAL
- case 920:
+ case 921:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4877
+//line sql.y:4869
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition}
}
yyVAL.union = yyLOCAL
- case 921:
+ case 922:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL TableExpr
-//line sql.y:4881
+//line sql.y:4873
{
yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion()}
}
yyVAL.union = yyLOCAL
- case 922:
+ case 923:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4887
+//line sql.y:4879
{
yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()}
}
- case 923:
+ case 924:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:4889
+//line sql.y:4881
{
yyVAL.joinCondition = &JoinCondition{Using: yyDollar[3].columnsUnion()}
}
- case 924:
+ case 925:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4893
+//line sql.y:4885
{
yyVAL.joinCondition = &JoinCondition{}
}
- case 925:
+ case 926:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4895
+//line sql.y:4887
{
yyVAL.joinCondition = yyDollar[1].joinCondition
}
- case 926:
+ case 927:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4899
+//line sql.y:4891
{
yyVAL.joinCondition = &JoinCondition{}
}
- case 927:
+ case 928:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4901
+//line sql.y:4893
{
yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()}
}
- case 928:
+ case 929:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4904
+//line sql.y:4896
{
yyVAL.empty = struct{}{}
}
- case 929:
+ case 930:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4906
+//line sql.y:4898
{
yyVAL.empty = struct{}{}
}
- case 930:
+ case 931:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:4909
+//line sql.y:4901
{
yyVAL.identifierCS = NewIdentifierCS("")
}
- case 931:
+ case 932:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4913
+//line sql.y:4905
{
yyVAL.identifierCS = yyDollar[1].identifierCS
}
- case 932:
+ case 933:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4917
+//line sql.y:4909
{
yyVAL.identifierCS = yyDollar[2].identifierCS
}
- case 934:
+ case 935:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4924
+//line sql.y:4916
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 935:
+ case 936:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4930
+//line sql.y:4922
{
yyLOCAL = NormalJoinType
}
yyVAL.union = yyLOCAL
- case 936:
+ case 937:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4934
+//line sql.y:4926
{
yyLOCAL = NormalJoinType
}
yyVAL.union = yyLOCAL
- case 937:
+ case 938:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4938
+//line sql.y:4930
{
yyLOCAL = NormalJoinType
}
yyVAL.union = yyLOCAL
- case 938:
+ case 939:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4944
+//line sql.y:4936
{
yyLOCAL = StraightJoinType
}
yyVAL.union = yyLOCAL
- case 939:
+ case 940:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4950
+//line sql.y:4942
{
yyLOCAL = LeftJoinType
}
yyVAL.union = yyLOCAL
- case 940:
+ case 941:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4954
+//line sql.y:4946
{
yyLOCAL = LeftJoinType
}
yyVAL.union = yyLOCAL
- case 941:
+ case 942:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4958
+//line sql.y:4950
{
yyLOCAL = RightJoinType
}
yyVAL.union = yyLOCAL
- case 942:
+ case 943:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4962
+//line sql.y:4954
{
yyLOCAL = RightJoinType
}
yyVAL.union = yyLOCAL
- case 943:
+ case 944:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4968
+//line sql.y:4960
{
yyLOCAL = NaturalJoinType
}
yyVAL.union = yyLOCAL
- case 944:
+ case 945:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL JoinType
-//line sql.y:4972
+//line sql.y:4964
{
if yyDollar[2].joinTypeUnion() == LeftJoinType {
yyLOCAL = NaturalLeftJoinType
@@ -15854,593 +15781,593 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 945:
+ case 946:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:4982
+//line sql.y:4974
{
yyVAL.tableName = yyDollar[2].tableName
}
- case 946:
+ case 947:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4986
+//line sql.y:4978
{
yyVAL.tableName = yyDollar[1].tableName
}
- case 947:
+ case 948:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:4992
+//line sql.y:4984
{
yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS}
}
- case 948:
+ case 949:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:4996
+//line sql.y:4988
{
yyVAL.tableName = TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}
}
- case 949:
+ case 950:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5002
+//line sql.y:4994
{
yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS}
}
- case 950:
+ case 951:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL IndexHints
-//line sql.y:5007
+//line sql.y:4999
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 951:
+ case 952:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IndexHints
-//line sql.y:5011
+//line sql.y:5003
{
yyLOCAL = yyDollar[1].indexHintsUnion()
}
yyVAL.union = yyLOCAL
- case 952:
+ case 953:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IndexHints
-//line sql.y:5017
+//line sql.y:5009
{
yyLOCAL = IndexHints{yyDollar[1].indexHintUnion()}
}
yyVAL.union = yyLOCAL
- case 953:
+ case 954:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:5021
+//line sql.y:5013
{
yySLICE := (*IndexHints)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].indexHintUnion())
}
- case 954:
+ case 955:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5027
+//line sql.y:5019
{
yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 955:
+ case 956:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5031
+//line sql.y:5023
{
yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 956:
+ case 957:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5035
+//line sql.y:5027
{
yyLOCAL = &IndexHint{Type: IgnoreOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 957:
+ case 958:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL *IndexHint
-//line sql.y:5039
+//line sql.y:5031
{
yyLOCAL = &IndexHint{Type: ForceOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()}
}
yyVAL.union = yyLOCAL
- case 958:
+ case 959:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5044
+//line sql.y:5036
{
yyLOCAL = NoForType
}
yyVAL.union = yyLOCAL
- case 959:
+ case 960:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5048
+//line sql.y:5040
{
yyLOCAL = JoinForType
}
yyVAL.union = yyLOCAL
- case 960:
+ case 961:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5052
+//line sql.y:5044
{
yyLOCAL = OrderByForType
}
yyVAL.union = yyLOCAL
- case 961:
+ case 962:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL IndexHintForType
-//line sql.y:5056
+//line sql.y:5048
{
yyLOCAL = GroupByForType
}
yyVAL.union = yyLOCAL
- case 962:
+ case 963:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5062
+//line sql.y:5054
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 963:
+ case 964:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5066
+//line sql.y:5058
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 964:
+ case 965:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5073
+//line sql.y:5065
{
yyLOCAL = &OrExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 965:
+ case 966:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5077
+//line sql.y:5069
{
yyLOCAL = &XorExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 966:
+ case 967:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5081
+//line sql.y:5073
{
yyLOCAL = &AndExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 967:
+ case 968:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5085
+//line sql.y:5077
{
yyLOCAL = &NotExpr{Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 968:
+ case 969:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5089
+//line sql.y:5081
{
yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].isExprOperatorUnion()}
}
yyVAL.union = yyLOCAL
- case 969:
+ case 970:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5093
+//line sql.y:5085
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 970:
+ case 971:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5097
+//line sql.y:5089
{
yyLOCAL = &MemberOfExpr{Value: yyDollar[1].exprUnion(), JSONArr: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 971:
+ case 972:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5103
+//line sql.y:5095
{
yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNullOp}
}
yyVAL.union = yyLOCAL
- case 972:
+ case 973:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5107
+//line sql.y:5099
{
yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNotNullOp}
}
yyVAL.union = yyLOCAL
- case 973:
+ case 974:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5111
+//line sql.y:5103
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: yyDollar[2].comparisonExprOperatorUnion(), Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 974:
+ case 975:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5115
+//line sql.y:5107
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 975:
+ case 976:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5121
+//line sql.y:5113
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: InOp, Right: yyDollar[3].colTupleUnion()}
}
yyVAL.union = yyLOCAL
- case 976:
+ case 977:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5125
+//line sql.y:5117
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotInOp, Right: yyDollar[4].colTupleUnion()}
}
yyVAL.union = yyLOCAL
- case 977:
+ case 978:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5129
+//line sql.y:5121
{
yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: true, From: yyDollar[3].exprUnion(), To: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 978:
+ case 979:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5133
+//line sql.y:5125
{
yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: false, From: yyDollar[4].exprUnion(), To: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 979:
+ case 980:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5137
+//line sql.y:5129
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 980:
+ case 981:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5141
+//line sql.y:5133
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 981:
+ case 982:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5145
+//line sql.y:5137
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion(), Escape: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 982:
+ case 983:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5149
+//line sql.y:5141
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion(), Escape: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 983:
+ case 984:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5153
+//line sql.y:5145
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: RegexpOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 984:
+ case 985:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5157
+//line sql.y:5149
{
yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotRegexpOp, Right: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 985:
+ case 986:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5161
+//line sql.y:5153
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 986:
+ case 987:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:5167
+//line sql.y:5159
{
}
- case 987:
+ case 988:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:5170
+//line sql.y:5162
{
}
- case 988:
+ case 989:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5176
+//line sql.y:5168
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitOrOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 989:
+ case 990:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5180
+//line sql.y:5172
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitAndOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 990:
+ case 991:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5184
+//line sql.y:5176
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftLeftOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 991:
+ case 992:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5188
+//line sql.y:5180
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftRightOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 992:
+ case 993:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5192
+//line sql.y:5184
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: PlusOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 993:
+ case 994:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5196
+//line sql.y:5188
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MinusOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 994:
+ case 995:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5200
+//line sql.y:5192
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MultOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 995:
+ case 996:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5204
+//line sql.y:5196
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: DivOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 996:
+ case 997:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5208
+//line sql.y:5200
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 997:
+ case 998:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5212
+//line sql.y:5204
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: IntDivOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 998:
+ case 999:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5216
+//line sql.y:5208
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 999:
+ case 1000:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5220
+//line sql.y:5212
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitXorOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1000:
+ case 1001:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5224
+//line sql.y:5216
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1001:
+ case 1002:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5230
+//line sql.y:5222
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1002:
+ case 1003:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5234
+//line sql.y:5226
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1003:
+ case 1004:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5238
+//line sql.y:5230
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1004:
+ case 1005:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5242
+//line sql.y:5234
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1005:
+ case 1006:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5246
+//line sql.y:5238
{
yyLOCAL = &CollateExpr{Expr: yyDollar[1].exprUnion(), Collation: yyDollar[3].str}
}
yyVAL.union = yyLOCAL
- case 1006:
+ case 1007:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5250
+//line sql.y:5242
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1007:
+ case 1008:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5254
+//line sql.y:5246
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1008:
+ case 1009:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5258
+//line sql.y:5250
{
yyLOCAL = yyDollar[1].variableUnion()
}
yyVAL.union = yyLOCAL
- case 1009:
+ case 1010:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5262
+//line sql.y:5254
{
yyLOCAL = yyDollar[2].exprUnion() // TODO: do we really want to ignore unary '+' before any kind of literals?
}
yyVAL.union = yyLOCAL
- case 1010:
+ case 1011:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5266
+//line sql.y:5258
{
yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1011:
+ case 1012:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5270
+//line sql.y:5262
{
yyLOCAL = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1012:
+ case 1013:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5274
+//line sql.y:5266
{
yyLOCAL = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1013:
+ case 1014:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5278
+//line sql.y:5270
{
yyLOCAL = yyDollar[1].subqueryUnion()
}
yyVAL.union = yyLOCAL
- case 1014:
+ case 1015:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5282
+//line sql.y:5274
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1015:
+ case 1016:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5286
+//line sql.y:5278
{
yyLOCAL = &ExistsExpr{Subquery: yyDollar[2].subqueryUnion()}
}
yyVAL.union = yyLOCAL
- case 1016:
+ case 1017:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5290
+//line sql.y:5282
{
yyLOCAL = &MatchExpr{Columns: yyDollar[2].colNamesUnion(), Expr: yyDollar[5].exprUnion(), Option: yyDollar[6].matchExprOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1017:
+ case 1018:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5294
+//line sql.y:5286
{
yyLOCAL = &CastExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion(), Array: yyDollar[6].booleanUnion()}
}
yyVAL.union = yyLOCAL
- case 1018:
+ case 1019:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5298
+//line sql.y:5290
{
yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1019:
+ case 1020:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5302
+//line sql.y:5294
{
yyLOCAL = &ConvertUsingExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].str}
}
yyVAL.union = yyLOCAL
- case 1020:
+ case 1021:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5306
+//line sql.y:5298
{
// From: https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#operator_binary
// To convert a string expression to a binary string, these constructs are equivalent:
@@ -16449,18 +16376,18 @@ yydefault:
yyLOCAL = &ConvertExpr{Expr: yyDollar[2].exprUnion(), Type: &ConvertType{Type: yyDollar[1].str}}
}
yyVAL.union = yyLOCAL
- case 1021:
+ case 1022:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5314
+//line sql.y:5306
{
yyLOCAL = &Default{ColName: yyDollar[2].str}
}
yyVAL.union = yyLOCAL
- case 1022:
+ case 1023:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5318
+//line sql.y:5310
{
// INTERVAL can trigger a shift / reduce conflict. We want
// to shift here for the interval rule. In case we do have
@@ -16469,2192 +16396,2192 @@ yydefault:
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1023:
+ case 1024:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5326
+//line sql.y:5318
{
yyLOCAL = &IntervalFuncExpr{Expr: yyDollar[3].exprUnion(), Exprs: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1024:
+ case 1025:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5330
+//line sql.y:5322
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1025:
+ case 1026:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5334
+//line sql.y:5326
{
yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1026:
+ case 1027:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5340
+//line sql.y:5332
{
yyLOCAL = &IntervalExpr{Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].identifierCI.String()}
}
yyVAL.union = yyLOCAL
- case 1027:
+ case 1028:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*ColName
-//line sql.y:5346
+//line sql.y:5338
{
yyLOCAL = yyDollar[1].colNamesUnion()
}
yyVAL.union = yyLOCAL
- case 1028:
+ case 1029:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL []*ColName
-//line sql.y:5350
+//line sql.y:5342
{
yyLOCAL = yyDollar[2].colNamesUnion()
}
yyVAL.union = yyLOCAL
- case 1029:
+ case 1030:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*ColName
-//line sql.y:5356
+//line sql.y:5348
{
yyLOCAL = []*ColName{yyDollar[1].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 1030:
+ case 1031:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5360
+//line sql.y:5352
{
yySLICE := (*[]*ColName)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].colNameUnion())
}
- case 1031:
+ case 1032:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TrimType
-//line sql.y:5366
+//line sql.y:5358
{
yyLOCAL = BothTrimType
}
yyVAL.union = yyLOCAL
- case 1032:
+ case 1033:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TrimType
-//line sql.y:5370
+//line sql.y:5362
{
yyLOCAL = LeadingTrimType
}
yyVAL.union = yyLOCAL
- case 1033:
+ case 1034:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL TrimType
-//line sql.y:5374
+//line sql.y:5366
{
yyLOCAL = TrailingTrimType
}
yyVAL.union = yyLOCAL
- case 1034:
+ case 1035:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FrameUnitType
-//line sql.y:5380
+//line sql.y:5372
{
yyLOCAL = FrameRowsType
}
yyVAL.union = yyLOCAL
- case 1035:
+ case 1036:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FrameUnitType
-//line sql.y:5384
+//line sql.y:5376
{
yyLOCAL = FrameRangeType
}
yyVAL.union = yyLOCAL
- case 1036:
+ case 1037:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5391
+//line sql.y:5383
{
yyLOCAL = CumeDistExprType
}
yyVAL.union = yyLOCAL
- case 1037:
+ case 1038:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5395
+//line sql.y:5387
{
yyLOCAL = DenseRankExprType
}
yyVAL.union = yyLOCAL
- case 1038:
+ case 1039:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5399
+//line sql.y:5391
{
yyLOCAL = PercentRankExprType
}
yyVAL.union = yyLOCAL
- case 1039:
+ case 1040:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5403
+//line sql.y:5395
{
yyLOCAL = RankExprType
}
yyVAL.union = yyLOCAL
- case 1040:
+ case 1041:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ArgumentLessWindowExprType
-//line sql.y:5407
+//line sql.y:5399
{
yyLOCAL = RowNumberExprType
}
yyVAL.union = yyLOCAL
- case 1041:
+ case 1042:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5413
+//line sql.y:5405
{
yyLOCAL = &FramePoint{Type: CurrentRowType}
}
yyVAL.union = yyLOCAL
- case 1042:
+ case 1043:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5417
+//line sql.y:5409
{
yyLOCAL = &FramePoint{Type: UnboundedPrecedingType}
}
yyVAL.union = yyLOCAL
- case 1043:
+ case 1044:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5421
+//line sql.y:5413
{
yyLOCAL = &FramePoint{Type: UnboundedFollowingType}
}
yyVAL.union = yyLOCAL
- case 1044:
+ case 1045:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5425
+//line sql.y:5417
{
yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[1].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1045:
+ case 1046:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FramePoint
-//line sql.y:5429
+//line sql.y:5421
{
yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[1].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1046:
+ case 1047:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5435
+//line sql.y:5427
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1047:
+ case 1048:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5439
+//line sql.y:5431
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1048:
+ case 1049:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5444
+//line sql.y:5436
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1049:
+ case 1050:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5448
+//line sql.y:5440
{
yyLOCAL = yyDollar[1].frameClauseUnion()
}
yyVAL.union = yyLOCAL
- case 1050:
+ case 1051:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5454
+//line sql.y:5446
{
yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[2].framePointUnion()}
}
yyVAL.union = yyLOCAL
- case 1051:
+ case 1052:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *FrameClause
-//line sql.y:5458
+//line sql.y:5450
{
yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[3].framePointUnion(), End: yyDollar[5].framePointUnion()}
}
yyVAL.union = yyLOCAL
- case 1052:
+ case 1053:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:5463
+//line sql.y:5455
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1053:
+ case 1054:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:5467
+//line sql.y:5459
{
yyLOCAL = yyDollar[3].exprsUnion()
}
yyVAL.union = yyLOCAL
- case 1054:
+ case 1055:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:5472
+//line sql.y:5464
{
}
- case 1055:
+ case 1056:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:5475
+//line sql.y:5467
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 1056:
+ case 1057:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *WindowSpecification
-//line sql.y:5481
+//line sql.y:5473
{
yyLOCAL = &WindowSpecification{Name: yyDollar[1].identifierCI, PartitionClause: yyDollar[2].exprsUnion(), OrderClause: yyDollar[3].orderByUnion(), FrameClause: yyDollar[4].frameClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1057:
+ case 1058:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *OverClause
-//line sql.y:5487
+//line sql.y:5479
{
yyLOCAL = &OverClause{WindowSpec: yyDollar[3].windowSpecificationUnion()}
}
yyVAL.union = yyLOCAL
- case 1058:
+ case 1059:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *OverClause
-//line sql.y:5491
+//line sql.y:5483
{
yyLOCAL = &OverClause{WindowName: yyDollar[2].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1059:
+ case 1060:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *NullTreatmentClause
-//line sql.y:5496
+//line sql.y:5488
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1061:
+ case 1062:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *NullTreatmentClause
-//line sql.y:5503
+//line sql.y:5495
{
yyLOCAL = &NullTreatmentClause{yyDollar[1].nullTreatmentTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1062:
+ case 1063:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL NullTreatmentType
-//line sql.y:5509
+//line sql.y:5501
{
yyLOCAL = RespectNullsType
}
yyVAL.union = yyLOCAL
- case 1063:
+ case 1064:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL NullTreatmentType
-//line sql.y:5513
+//line sql.y:5505
{
yyLOCAL = IgnoreNullsType
}
yyVAL.union = yyLOCAL
- case 1064:
+ case 1065:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FirstOrLastValueExprType
-//line sql.y:5519
+//line sql.y:5511
{
yyLOCAL = FirstValueExprType
}
yyVAL.union = yyLOCAL
- case 1065:
+ case 1066:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL FirstOrLastValueExprType
-//line sql.y:5523
+//line sql.y:5515
{
yyLOCAL = LastValueExprType
}
yyVAL.union = yyLOCAL
- case 1066:
+ case 1067:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL FromFirstLastType
-//line sql.y:5529
+//line sql.y:5521
{
yyLOCAL = FromFirstType
}
yyVAL.union = yyLOCAL
- case 1067:
+ case 1068:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL FromFirstLastType
-//line sql.y:5533
+//line sql.y:5525
{
yyLOCAL = FromLastType
}
yyVAL.union = yyLOCAL
- case 1068:
+ case 1069:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *FromFirstLastClause
-//line sql.y:5538
+//line sql.y:5530
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1070:
+ case 1071:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *FromFirstLastClause
-//line sql.y:5545
+//line sql.y:5537
{
yyLOCAL = &FromFirstLastClause{yyDollar[1].fromFirstLastTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1071:
+ case 1072:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LagLeadExprType
-//line sql.y:5551
+//line sql.y:5543
{
yyLOCAL = LagExprType
}
yyVAL.union = yyLOCAL
- case 1072:
+ case 1073:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL LagLeadExprType
-//line sql.y:5555
+//line sql.y:5547
{
yyLOCAL = LeadExprType
}
yyVAL.union = yyLOCAL
- case 1073:
+ case 1074:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *WindowDefinition
-//line sql.y:5561
+//line sql.y:5553
{
yyLOCAL = &WindowDefinition{Name: yyDollar[1].identifierCI, WindowSpec: yyDollar[4].windowSpecificationUnion()}
}
yyVAL.union = yyLOCAL
- case 1074:
+ case 1075:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL WindowDefinitions
-//line sql.y:5567
+//line sql.y:5559
{
yyLOCAL = WindowDefinitions{yyDollar[1].windowDefinitionUnion()}
}
yyVAL.union = yyLOCAL
- case 1075:
+ case 1076:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5571
+//line sql.y:5563
{
yySLICE := (*WindowDefinitions)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].windowDefinitionUnion())
}
- case 1076:
+ case 1077:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:5577
+//line sql.y:5569
{
yyVAL.str = ""
}
- case 1077:
+ case 1078:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5581
+//line sql.y:5573
{
yyVAL.str = string(yyDollar[2].identifierCI.String())
}
- case 1078:
+ case 1079:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL BoolVal
-//line sql.y:5587
+//line sql.y:5579
{
yyLOCAL = BoolVal(true)
}
yyVAL.union = yyLOCAL
- case 1079:
+ case 1080:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL BoolVal
-//line sql.y:5591
+//line sql.y:5583
{
yyLOCAL = BoolVal(false)
}
yyVAL.union = yyLOCAL
- case 1080:
+ case 1081:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5598
+//line sql.y:5590
{
yyLOCAL = IsTrueOp
}
yyVAL.union = yyLOCAL
- case 1081:
+ case 1082:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5602
+//line sql.y:5594
{
yyLOCAL = IsNotTrueOp
}
yyVAL.union = yyLOCAL
- case 1082:
+ case 1083:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5606
+//line sql.y:5598
{
yyLOCAL = IsFalseOp
}
yyVAL.union = yyLOCAL
- case 1083:
+ case 1084:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL IsExprOperator
-//line sql.y:5610
+//line sql.y:5602
{
yyLOCAL = IsNotFalseOp
}
yyVAL.union = yyLOCAL
- case 1084:
+ case 1085:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5616
+//line sql.y:5608
{
yyLOCAL = EqualOp
}
yyVAL.union = yyLOCAL
- case 1085:
+ case 1086:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5620
+//line sql.y:5612
{
yyLOCAL = LessThanOp
}
yyVAL.union = yyLOCAL
- case 1086:
+ case 1087:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5624
+//line sql.y:5616
{
yyLOCAL = GreaterThanOp
}
yyVAL.union = yyLOCAL
- case 1087:
+ case 1088:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5628
+//line sql.y:5620
{
yyLOCAL = LessEqualOp
}
yyVAL.union = yyLOCAL
- case 1088:
+ case 1089:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5632
+//line sql.y:5624
{
yyLOCAL = GreaterEqualOp
}
yyVAL.union = yyLOCAL
- case 1089:
+ case 1090:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5636
+//line sql.y:5628
{
yyLOCAL = NotEqualOp
}
yyVAL.union = yyLOCAL
- case 1090:
+ case 1091:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ComparisonExprOperator
-//line sql.y:5640
+//line sql.y:5632
{
yyLOCAL = NullSafeEqualOp
}
yyVAL.union = yyLOCAL
- case 1091:
+ case 1092:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColTuple
-//line sql.y:5646
+//line sql.y:5638
{
yyLOCAL = yyDollar[1].valTupleUnion()
}
yyVAL.union = yyLOCAL
- case 1092:
+ case 1093:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColTuple
-//line sql.y:5650
+//line sql.y:5642
{
yyLOCAL = yyDollar[1].subqueryUnion()
}
yyVAL.union = yyLOCAL
- case 1093:
+ case 1094:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ColTuple
-//line sql.y:5654
+//line sql.y:5646
{
yyLOCAL = ListArg(yyDollar[1].str[2:])
bindVariable(yylex, yyDollar[1].str[2:])
}
yyVAL.union = yyLOCAL
- case 1094:
+ case 1095:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Subquery
-//line sql.y:5661
+//line sql.y:5653
{
yyLOCAL = &Subquery{yyDollar[1].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 1095:
+ case 1096:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:5667
+//line sql.y:5659
{
yyLOCAL = Exprs{yyDollar[1].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1096:
+ case 1097:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:5671
+//line sql.y:5663
{
yySLICE := (*Exprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].exprUnion())
}
- case 1097:
+ case 1098:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5681
+//line sql.y:5673
{
yyLOCAL = &FuncExpr{Name: yyDollar[1].identifierCI, Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1098:
+ case 1099:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5685
+//line sql.y:5677
{
yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCI, Exprs: yyDollar[5].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1099:
+ case 1100:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5695
+//line sql.y:5687
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1100:
+ case 1101:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5699
+//line sql.y:5691
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1101:
+ case 1102:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5703
+//line sql.y:5695
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1102:
+ case 1103:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5707
+//line sql.y:5699
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1103:
+ case 1104:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5711
+//line sql.y:5703
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1104:
+ case 1105:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5715
+//line sql.y:5707
{
yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1105:
+ case 1106:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5719
+//line sql.y:5711
{
yyLOCAL = &CaseExpr{Expr: yyDollar[2].exprUnion(), Whens: yyDollar[3].whensUnion(), Else: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1106:
+ case 1107:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5723
+//line sql.y:5715
{
yyLOCAL = &ValuesFuncExpr{Name: yyDollar[3].colNameUnion()}
}
yyVAL.union = yyLOCAL
- case 1107:
+ case 1108:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5727
+//line sql.y:5719
{
yyLOCAL = &InsertExpr{Str: yyDollar[3].exprUnion(), Pos: yyDollar[5].exprUnion(), Len: yyDollar[7].exprUnion(), NewStr: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1108:
+ case 1109:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5731
+//line sql.y:5723
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1109:
+ case 1110:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5742
+//line sql.y:5734
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("utc_date")}
}
yyVAL.union = yyLOCAL
- case 1110:
+ case 1111:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5746
+//line sql.y:5738
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1111:
+ case 1112:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5752
+//line sql.y:5744
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("current_date")}
}
yyVAL.union = yyLOCAL
- case 1112:
+ case 1113:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5756
+//line sql.y:5748
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_time"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1113:
+ case 1114:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5761
+//line sql.y:5753
{
yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1114:
+ case 1115:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5765
+//line sql.y:5757
{
yyLOCAL = &CountStar{}
}
yyVAL.union = yyLOCAL
- case 1115:
+ case 1116:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5769
+//line sql.y:5761
{
yyLOCAL = &Count{Distinct: yyDollar[3].booleanUnion(), Args: yyDollar[4].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1116:
+ case 1117:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5773
+//line sql.y:5765
{
yyLOCAL = &Max{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1117:
+ case 1118:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5777
+//line sql.y:5769
{
yyLOCAL = &Min{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1118:
+ case 1119:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5781
+//line sql.y:5773
{
yyLOCAL = &Sum{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1119:
+ case 1120:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5785
+//line sql.y:5777
{
yyLOCAL = &Avg{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1120:
+ case 1121:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5789
+//line sql.y:5781
{
yyLOCAL = &BitAnd{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1121:
+ case 1122:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5793
+//line sql.y:5785
{
yyLOCAL = &BitOr{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1122:
+ case 1123:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5797
+//line sql.y:5789
{
yyLOCAL = &BitXor{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1123:
+ case 1124:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5801
+//line sql.y:5793
{
yyLOCAL = &Std{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1124:
+ case 1125:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5805
+//line sql.y:5797
{
yyLOCAL = &StdDev{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1125:
+ case 1126:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5809
+//line sql.y:5801
{
yyLOCAL = &StdPop{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1126:
+ case 1127:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5813
+//line sql.y:5805
{
yyLOCAL = &StdSamp{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1127:
+ case 1128:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5817
+//line sql.y:5809
{
yyLOCAL = &VarPop{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1128:
+ case 1129:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5821
+//line sql.y:5813
{
yyLOCAL = &VarSamp{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1129:
+ case 1130:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5825
+//line sql.y:5817
{
yyLOCAL = &Variance{Arg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1130:
+ case 1131:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5829
+//line sql.y:5821
{
yyLOCAL = &GroupConcatExpr{Distinct: yyDollar[3].booleanUnion(), Exprs: yyDollar[4].exprsUnion(), OrderBy: yyDollar[5].orderByUnion(), Separator: yyDollar[6].str, Limit: yyDollar[7].limitUnion()}
}
yyVAL.union = yyLOCAL
- case 1131:
+ case 1132:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5833
+//line sql.y:5825
{
yyLOCAL = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1132:
+ case 1133:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5837
+//line sql.y:5829
{
yyLOCAL = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1133:
+ case 1134:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5841
+//line sql.y:5833
{
yyLOCAL = &ExtractFuncExpr{IntervalTypes: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1134:
+ case 1135:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5845
+//line sql.y:5837
{
yyLOCAL = &WeightStringFuncExpr{Expr: yyDollar[3].exprUnion(), As: yyDollar[4].convertTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1135:
+ case 1136:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5849
+//line sql.y:5841
{
yyLOCAL = &JSONPrettyExpr{JSONVal: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1136:
+ case 1137:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5853
+//line sql.y:5845
{
yyLOCAL = &JSONStorageFreeExpr{JSONVal: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1137:
+ case 1138:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5857
+//line sql.y:5849
{
yyLOCAL = &JSONStorageSizeExpr{JSONVal: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1138:
+ case 1139:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5861
+//line sql.y:5853
{
yyLOCAL = &TrimFuncExpr{TrimFuncType: LTrimType, StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1139:
+ case 1140:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5865
+//line sql.y:5857
{
yyLOCAL = &TrimFuncExpr{TrimFuncType: RTrimType, StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1140:
+ case 1141:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5869
+//line sql.y:5861
{
yyLOCAL = &TrimFuncExpr{Type: yyDollar[3].trimTypeUnion(), TrimArg: yyDollar[4].exprUnion(), StringArg: yyDollar[6].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1141:
+ case 1142:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5873
+//line sql.y:5865
{
yyLOCAL = &TrimFuncExpr{StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1142:
+ case 1143:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5877
+//line sql.y:5869
{
yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1143:
+ case 1144:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5881
+//line sql.y:5873
{
yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion(), Charset: yyDollar[5].str}
}
yyVAL.union = yyLOCAL
- case 1144:
+ case 1145:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5885
+//line sql.y:5877
{
yyLOCAL = &TrimFuncExpr{TrimArg: yyDollar[3].exprUnion(), StringArg: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1145:
+ case 1146:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5889
+//line sql.y:5881
{
yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1146:
+ case 1147:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5893
+//line sql.y:5885
{
yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion(), Pos: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1147:
+ case 1148:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5897
+//line sql.y:5889
{
yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1148:
+ case 1149:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5901
+//line sql.y:5893
{
yyLOCAL = &LockingFunc{Type: GetLock, Name: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1149:
+ case 1150:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5905
+//line sql.y:5897
{
yyLOCAL = &LockingFunc{Type: IsFreeLock, Name: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1150:
+ case 1151:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5909
+//line sql.y:5901
{
yyLOCAL = &LockingFunc{Type: IsUsedLock, Name: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1151:
+ case 1152:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5913
+//line sql.y:5905
{
yyLOCAL = &LockingFunc{Type: ReleaseAllLocks}
}
yyVAL.union = yyLOCAL
- case 1152:
+ case 1153:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5917
+//line sql.y:5909
{
yyLOCAL = &LockingFunc{Type: ReleaseLock, Name: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1153:
+ case 1154:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5921
+//line sql.y:5913
{
yyLOCAL = &JSONSchemaValidFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1154:
+ case 1155:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5925
+//line sql.y:5917
{
yyLOCAL = &JSONSchemaValidationReportFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1155:
+ case 1156:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5929
+//line sql.y:5921
{
yyLOCAL = &JSONArrayExpr{Params: yyDollar[3].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1156:
+ case 1157:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5933
+//line sql.y:5925
{
yyLOCAL = &JSONObjectExpr{Params: yyDollar[3].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1157:
+ case 1158:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5937
+//line sql.y:5929
{
yyLOCAL = &JSONQuoteExpr{StringArg: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1158:
+ case 1159:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5941
+//line sql.y:5933
{
yyLOCAL = &JSONContainsExpr{Target: yyDollar[3].exprUnion(), Candidate: yyDollar[5].exprsUnion()[0], PathList: yyDollar[5].exprsUnion()[1:]}
}
yyVAL.union = yyLOCAL
- case 1159:
+ case 1160:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5945
+//line sql.y:5937
{
yyLOCAL = &JSONContainsPathExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), PathList: yyDollar[7].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1160:
+ case 1161:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5949
+//line sql.y:5941
{
yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1161:
+ case 1162:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5953
+//line sql.y:5945
{
yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1162:
+ case 1163:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5957
+//line sql.y:5949
{
yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1163:
+ case 1164:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5961
+//line sql.y:5953
{
yyLOCAL = &JSONOverlapsExpr{JSONDoc1: yyDollar[3].exprUnion(), JSONDoc2: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1164:
+ case 1165:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5965
+//line sql.y:5957
{
yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1165:
+ case 1166:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5969
+//line sql.y:5961
{
yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion(), EscapeChar: yyDollar[9].exprsUnion()[0], PathList: yyDollar[9].exprsUnion()[1:]}
}
yyVAL.union = yyLOCAL
- case 1166:
+ case 1167:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5973
+//line sql.y:5965
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion()}
}
yyVAL.union = yyLOCAL
- case 1167:
+ case 1168:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5977
+//line sql.y:5969
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()}
}
yyVAL.union = yyLOCAL
- case 1168:
+ case 1169:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5981
+//line sql.y:5973
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()}
}
yyVAL.union = yyLOCAL
- case 1169:
+ case 1170:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5985
+//line sql.y:5977
{
yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()}
}
yyVAL.union = yyLOCAL
- case 1170:
+ case 1171:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5989
+//line sql.y:5981
{
yyLOCAL = &JSONAttributesExpr{Type: DepthAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1171:
+ case 1172:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5993
+//line sql.y:5985
{
yyLOCAL = &JSONAttributesExpr{Type: ValidAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1172:
+ case 1173:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:5997
+//line sql.y:5989
{
yyLOCAL = &JSONAttributesExpr{Type: TypeAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1173:
+ case 1174:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6001
+//line sql.y:5993
{
yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1174:
+ case 1175:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6005
+//line sql.y:5997
{
yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1175:
+ case 1176:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6009
+//line sql.y:6001
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayAppendType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1176:
+ case 1177:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6013
+//line sql.y:6005
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1177:
+ case 1178:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6017
+//line sql.y:6009
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1178:
+ case 1179:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6021
+//line sql.y:6013
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONReplaceType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1179:
+ case 1180:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6025
+//line sql.y:6017
{
yyLOCAL = &JSONValueModifierExpr{Type: JSONSetType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()}
}
yyVAL.union = yyLOCAL
- case 1180:
+ case 1181:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6029
+//line sql.y:6021
{
yyLOCAL = &JSONValueMergeExpr{Type: JSONMergeType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1181:
+ case 1182:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6033
+//line sql.y:6025
{
yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePatchType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1182:
+ case 1183:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6037
+//line sql.y:6029
{
yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePreserveType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1183:
+ case 1184:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6041
+//line sql.y:6033
{
yyLOCAL = &JSONRemoveExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1184:
+ case 1185:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6045
+//line sql.y:6037
{
yyLOCAL = &JSONUnquoteExpr{JSONValue: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1185:
+ case 1186:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6049
+//line sql.y:6041
{
yyLOCAL = &ArgumentLessWindowExpr{Type: yyDollar[1].argumentLessWindowExprTypeUnion(), OverClause: yyDollar[4].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1186:
+ case 1187:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6053
+//line sql.y:6045
{
yyLOCAL = &FirstOrLastValueExpr{Type: yyDollar[1].firstOrLastValueExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1187:
+ case 1188:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6057
+//line sql.y:6049
{
yyLOCAL = &NtileExpr{N: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1188:
+ case 1189:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6061
+//line sql.y:6053
{
yyLOCAL = &NTHValueExpr{Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), FromFirstLastClause: yyDollar[7].fromFirstLastClauseUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1189:
+ case 1190:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6065
+//line sql.y:6057
{
yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1190:
+ case 1191:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6069
+//line sql.y:6061
{
yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), Default: yyDollar[6].exprUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()}
}
yyVAL.union = yyLOCAL
- case 1195:
+ case 1196:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6079
+//line sql.y:6071
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1196:
+ case 1197:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6083
+//line sql.y:6075
{
yyLOCAL = NewIntLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 1197:
+ case 1198:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6087
+//line sql.y:6079
{
yyLOCAL = yyDollar[1].variableUnion()
}
yyVAL.union = yyLOCAL
- case 1198:
+ case 1199:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6091
+//line sql.y:6083
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
}
yyVAL.union = yyLOCAL
- case 1199:
+ case 1200:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6097
+//line sql.y:6089
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1200:
+ case 1201:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6101
+//line sql.y:6093
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1201:
+ case 1202:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6107
+//line sql.y:6099
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1202:
+ case 1203:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6111
+//line sql.y:6103
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1203:
+ case 1204:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6115
+//line sql.y:6107
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1204:
+ case 1205:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6119
+//line sql.y:6111
{
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1205:
+ case 1206:
yyDollar = yyS[yypt-14 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6123
+//line sql.y:6115
{
// Match type is kept expression as TRIM( ' m ') is accepted
yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1206:
+ case 1207:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6128
+//line sql.y:6120
{
yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1207:
+ case 1208:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6132
+//line sql.y:6124
{
yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), MatchType: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1208:
+ case 1209:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6136
+//line sql.y:6128
{
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1209:
+ case 1210:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6140
+//line sql.y:6132
{
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1210:
+ case 1211:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6144
+//line sql.y:6136
{
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1211:
+ case 1212:
yyDollar = yyS[yypt-14 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6148
+//line sql.y:6140
{
// Match type is kept expression as TRIM( ' m ') is accepted
yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1212:
+ case 1213:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6153
+//line sql.y:6145
{
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1213:
+ case 1214:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6157
+//line sql.y:6149
{
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1214:
+ case 1215:
yyDollar = yyS[yypt-10 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6161
+//line sql.y:6153
{
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1215:
+ case 1216:
yyDollar = yyS[yypt-12 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6165
+//line sql.y:6157
{
// Match type is kept expression as TRIM( ' m ') is accepted
yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), MatchType: yyDollar[11].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1216:
+ case 1217:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6172
+//line sql.y:6164
{
yyLOCAL = &ExtractValueExpr{Fragment: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1217:
+ case 1218:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6176
+//line sql.y:6168
{
yyLOCAL = &UpdateXMLExpr{Target: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion(), NewXML: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1218:
+ case 1219:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6182
+//line sql.y:6174
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatBytesType, Argument: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1219:
+ case 1220:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6186
+//line sql.y:6178
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatPicoTimeType, Argument: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1220:
+ case 1221:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6190
+//line sql.y:6182
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsCurrentThreadIDType}
}
yyVAL.union = yyLOCAL
- case 1221:
+ case 1222:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6194
+//line sql.y:6186
{
yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsThreadIDType, Argument: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1222:
+ case 1223:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6200
+//line sql.y:6192
{
yyLOCAL = >IDFuncExpr{Type: GTIDSubsetType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1223:
+ case 1224:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6204
+//line sql.y:6196
{
yyLOCAL = >IDFuncExpr{Type: GTIDSubtractType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1224:
+ case 1225:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6208
+//line sql.y:6200
{
yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1225:
+ case 1226:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6212
+//line sql.y:6204
{
yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1226:
+ case 1227:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6216
+//line sql.y:6208
{
yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1227:
+ case 1228:
yyDollar = yyS[yypt-6 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6220
+//line sql.y:6212
{
yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1228:
+ case 1229:
yyDollar = yyS[yypt-8 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6224
+//line sql.y:6216
{
yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion(), Channel: yyDollar[7].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1229:
+ case 1230:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6229
+//line sql.y:6221
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1230:
+ case 1231:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6233
+//line sql.y:6225
{
yyLOCAL = yyDollar[2].convertTypeUnion()
}
yyVAL.union = yyLOCAL
- case 1231:
+ case 1232:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6239
+//line sql.y:6231
{
}
- case 1232:
+ case 1233:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6241
+//line sql.y:6233
{
yyLOCAL = IntervalDayHour
}
yyVAL.union = yyLOCAL
- case 1233:
+ case 1234:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6245
+//line sql.y:6237
{
yyLOCAL = IntervalDayMicrosecond
}
yyVAL.union = yyLOCAL
- case 1234:
+ case 1235:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6249
+//line sql.y:6241
{
yyLOCAL = IntervalDayMinute
}
yyVAL.union = yyLOCAL
- case 1235:
+ case 1236:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6253
+//line sql.y:6245
{
yyLOCAL = IntervalDaySecond
}
yyVAL.union = yyLOCAL
- case 1236:
+ case 1237:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6257
+//line sql.y:6249
{
yyLOCAL = IntervalHourMicrosecond
}
yyVAL.union = yyLOCAL
- case 1237:
+ case 1238:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6261
+//line sql.y:6253
{
yyLOCAL = IntervalHourMinute
}
yyVAL.union = yyLOCAL
- case 1238:
+ case 1239:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6265
+//line sql.y:6257
{
yyLOCAL = IntervalHourSecond
}
yyVAL.union = yyLOCAL
- case 1239:
+ case 1240:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6269
+//line sql.y:6261
{
yyLOCAL = IntervalMinuteMicrosecond
}
yyVAL.union = yyLOCAL
- case 1240:
+ case 1241:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6273
+//line sql.y:6265
{
yyLOCAL = IntervalMinuteSecond
}
yyVAL.union = yyLOCAL
- case 1241:
+ case 1242:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6277
+//line sql.y:6269
{
yyLOCAL = IntervalSecondMicrosecond
}
yyVAL.union = yyLOCAL
- case 1242:
+ case 1243:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6281
+//line sql.y:6273
{
yyLOCAL = IntervalYearMonth
}
yyVAL.union = yyLOCAL
- case 1243:
+ case 1244:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6287
+//line sql.y:6279
{
yyLOCAL = IntervalDay
}
yyVAL.union = yyLOCAL
- case 1244:
+ case 1245:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6291
+//line sql.y:6283
{
yyLOCAL = IntervalWeek
}
yyVAL.union = yyLOCAL
- case 1245:
+ case 1246:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6295
+//line sql.y:6287
{
yyLOCAL = IntervalHour
}
yyVAL.union = yyLOCAL
- case 1246:
+ case 1247:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6299
+//line sql.y:6291
{
yyLOCAL = IntervalMinute
}
yyVAL.union = yyLOCAL
- case 1247:
+ case 1248:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6303
+//line sql.y:6295
{
yyLOCAL = IntervalMonth
}
yyVAL.union = yyLOCAL
- case 1248:
+ case 1249:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6307
+//line sql.y:6299
{
yyLOCAL = IntervalQuarter
}
yyVAL.union = yyLOCAL
- case 1249:
+ case 1250:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6311
+//line sql.y:6303
{
yyLOCAL = IntervalSecond
}
yyVAL.union = yyLOCAL
- case 1250:
+ case 1251:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6315
+//line sql.y:6307
{
yyLOCAL = IntervalMicrosecond
}
yyVAL.union = yyLOCAL
- case 1251:
+ case 1252:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL IntervalTypes
-//line sql.y:6319
+//line sql.y:6311
{
yyLOCAL = IntervalYear
}
yyVAL.union = yyLOCAL
- case 1254:
+ case 1255:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6329
+//line sql.y:6321
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1255:
+ case 1256:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6333
+//line sql.y:6325
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1256:
+ case 1257:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6337
+//line sql.y:6329
{
yyLOCAL = NewIntLiteral(yyDollar[2].str)
}
yyVAL.union = yyLOCAL
- case 1257:
+ case 1258:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6341
+//line sql.y:6333
{
yyLOCAL = NewArgument(yyDollar[2].str[1:])
bindVariable(yylex, yyDollar[2].str[1:])
}
yyVAL.union = yyLOCAL
- case 1258:
+ case 1259:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6352
+//line sql.y:6344
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1259:
+ case 1260:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6356
+//line sql.y:6348
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1260:
+ case 1261:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6360
+//line sql.y:6352
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1261:
+ case 1262:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6364
+//line sql.y:6356
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1262:
+ case 1263:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6368
+//line sql.y:6360
{
yyLOCAL = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: yyDollar[3].selectExprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1263:
+ case 1264:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6374
+//line sql.y:6366
{
yyLOCAL = NoOption
}
yyVAL.union = yyLOCAL
- case 1264:
+ case 1265:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6378
+//line sql.y:6370
{
yyLOCAL = BooleanModeOpt
}
yyVAL.union = yyLOCAL
- case 1265:
+ case 1266:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6382
+//line sql.y:6374
{
yyLOCAL = NaturalLanguageModeOpt
}
yyVAL.union = yyLOCAL
- case 1266:
+ case 1267:
yyDollar = yyS[yypt-7 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6386
+//line sql.y:6378
{
yyLOCAL = NaturalLanguageModeWithQueryExpansionOpt
}
yyVAL.union = yyLOCAL
- case 1267:
+ case 1268:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL MatchExprOption
-//line sql.y:6390
+//line sql.y:6382
{
yyLOCAL = QueryExpansionOpt
}
yyVAL.union = yyLOCAL
- case 1268:
+ case 1269:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6396
+//line sql.y:6388
{
yyVAL.str = string(yyDollar[1].identifierCI.String())
}
- case 1269:
+ case 1270:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6400
+//line sql.y:6392
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1270:
+ case 1271:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6404
+//line sql.y:6396
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1271:
+ case 1272:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6410
+//line sql.y:6402
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1272:
+ case 1273:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6414
+//line sql.y:6406
{
yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 1273:
+ case 1274:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6418
+//line sql.y:6410
{
yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)}
}
yyVAL.union = yyLOCAL
- case 1274:
+ case 1275:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6424
+//line sql.y:6416
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1275:
+ case 1276:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6428
+//line sql.y:6420
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset}
}
yyVAL.union = yyLOCAL
- case 1276:
+ case 1277:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6432
+//line sql.y:6424
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1277:
+ case 1278:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6436
+//line sql.y:6428
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1278:
+ case 1279:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6440
+//line sql.y:6432
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
yyLOCAL.Length = yyDollar[2].LengthScaleOption.Length
yyLOCAL.Scale = yyDollar[2].LengthScaleOption.Scale
}
yyVAL.union = yyLOCAL
- case 1279:
+ case 1280:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6446
+//line sql.y:6438
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1280:
+ case 1281:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6450
+//line sql.y:6442
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1281:
+ case 1282:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6454
+//line sql.y:6446
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1282:
+ case 1283:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6458
+//line sql.y:6450
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1283:
+ case 1284:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6462
+//line sql.y:6454
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1284:
+ case 1285:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6466
+//line sql.y:6458
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1285:
+ case 1286:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6470
+//line sql.y:6462
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1286:
+ case 1287:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6474
+//line sql.y:6466
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()}
}
yyVAL.union = yyLOCAL
- case 1287:
+ case 1288:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6478
+//line sql.y:6470
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1288:
+ case 1289:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ConvertType
-//line sql.y:6482
+//line sql.y:6474
{
yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1289:
+ case 1290:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:6488
+//line sql.y:6480
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1290:
+ case 1291:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:6492
+//line sql.y:6484
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1291:
+ case 1292:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6497
+//line sql.y:6489
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1292:
+ case 1293:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6501
+//line sql.y:6493
{
yyLOCAL = yyDollar[1].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1293:
+ case 1294:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6506
+//line sql.y:6498
{
yyVAL.str = string("")
}
- case 1294:
+ case 1295:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6510
+//line sql.y:6502
{
yyVAL.str = " separator " + encodeSQLString(yyDollar[2].str)
}
- case 1295:
+ case 1296:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*When
-//line sql.y:6516
+//line sql.y:6508
{
yyLOCAL = []*When{yyDollar[1].whenUnion()}
}
yyVAL.union = yyLOCAL
- case 1296:
+ case 1297:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6520
+//line sql.y:6512
{
yySLICE := (*[]*When)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[2].whenUnion())
}
- case 1297:
+ case 1298:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *When
-//line sql.y:6526
+//line sql.y:6518
{
yyLOCAL = &When{Cond: yyDollar[2].exprUnion(), Val: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1298:
+ case 1299:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6531
+//line sql.y:6523
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1299:
+ case 1300:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6535
+//line sql.y:6527
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1300:
+ case 1301:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6541
+//line sql.y:6533
{
yyLOCAL = &ColName{Name: yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1301:
+ case 1302:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6545
+//line sql.y:6537
{
yyLOCAL = &ColName{Name: NewIdentifierCI(string(yyDollar[1].str))}
}
yyVAL.union = yyLOCAL
- case 1302:
+ case 1303:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6549
+//line sql.y:6541
{
yyLOCAL = &ColName{Qualifier: TableName{Name: yyDollar[1].identifierCS}, Name: yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1303:
+ case 1304:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *ColName
-//line sql.y:6553
+//line sql.y:6545
{
yyLOCAL = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}, Name: yyDollar[5].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1304:
+ case 1305:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6559
+//line sql.y:6551
{
yyLOCAL = yyDollar[1].colNameUnion()
}
yyVAL.union = yyLOCAL
- case 1305:
+ case 1306:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6563
+//line sql.y:6555
{
yyLOCAL = &Offset{V: convertStringToInt(yyDollar[1].str)}
}
yyVAL.union = yyLOCAL
- case 1306:
+ case 1307:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6569
+//line sql.y:6561
{
// TODO(sougou): Deprecate this construct.
if yyDollar[1].identifierCI.Lowered() != "value" {
@@ -18664,427 +18591,427 @@ yydefault:
yyLOCAL = NewIntLiteral("1")
}
yyVAL.union = yyLOCAL
- case 1307:
+ case 1308:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6578
+//line sql.y:6570
{
yyLOCAL = NewIntLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 1308:
+ case 1309:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6582
+//line sql.y:6574
{
yyLOCAL = NewArgument(yyDollar[1].str[1:])
bindVariable(yylex, yyDollar[1].str[1:])
}
yyVAL.union = yyLOCAL
- case 1309:
+ case 1310:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:6588
+//line sql.y:6580
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1310:
+ case 1311:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:6592
+//line sql.y:6584
{
yyLOCAL = yyDollar[3].exprsUnion()
}
yyVAL.union = yyLOCAL
- case 1311:
+ case 1312:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6597
+//line sql.y:6589
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1312:
+ case 1313:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Expr
-//line sql.y:6601
+//line sql.y:6593
{
yyLOCAL = yyDollar[2].exprUnion()
}
yyVAL.union = yyLOCAL
- case 1313:
+ case 1314:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *NamedWindow
-//line sql.y:6607
+//line sql.y:6599
{
yyLOCAL = &NamedWindow{yyDollar[2].windowDefinitionsUnion()}
}
yyVAL.union = yyLOCAL
- case 1314:
+ case 1315:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL NamedWindows
-//line sql.y:6613
+//line sql.y:6605
{
yyLOCAL = NamedWindows{yyDollar[1].namedWindowUnion()}
}
yyVAL.union = yyLOCAL
- case 1315:
+ case 1316:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6617
+//line sql.y:6609
{
yySLICE := (*NamedWindows)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].namedWindowUnion())
}
- case 1316:
+ case 1317:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL NamedWindows
-//line sql.y:6622
+//line sql.y:6614
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1317:
+ case 1318:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL NamedWindows
-//line sql.y:6626
+//line sql.y:6618
{
yyLOCAL = yyDollar[1].namedWindowsUnion()
}
yyVAL.union = yyLOCAL
- case 1318:
+ case 1319:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6631
+//line sql.y:6623
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1319:
+ case 1320:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6635
+//line sql.y:6627
{
yyLOCAL = yyDollar[1].orderByUnion()
}
yyVAL.union = yyLOCAL
- case 1320:
+ case 1321:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6641
+//line sql.y:6633
{
yyLOCAL = yyDollar[3].orderByUnion()
}
yyVAL.union = yyLOCAL
- case 1321:
+ case 1322:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderBy
-//line sql.y:6647
+//line sql.y:6639
{
yyLOCAL = OrderBy{yyDollar[1].orderUnion()}
}
yyVAL.union = yyLOCAL
- case 1322:
+ case 1323:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6651
+//line sql.y:6643
{
yySLICE := (*OrderBy)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].orderUnion())
}
- case 1323:
+ case 1324:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Order
-//line sql.y:6657
+//line sql.y:6649
{
yyLOCAL = &Order{Expr: yyDollar[1].exprUnion(), Direction: yyDollar[2].orderDirectionUnion()}
}
yyVAL.union = yyLOCAL
- case 1324:
+ case 1325:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL OrderDirection
-//line sql.y:6662
+//line sql.y:6654
{
yyLOCAL = AscOrder
}
yyVAL.union = yyLOCAL
- case 1325:
+ case 1326:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderDirection
-//line sql.y:6666
+//line sql.y:6658
{
yyLOCAL = AscOrder
}
yyVAL.union = yyLOCAL
- case 1326:
+ case 1327:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL OrderDirection
-//line sql.y:6670
+//line sql.y:6662
{
yyLOCAL = DescOrder
}
yyVAL.union = yyLOCAL
- case 1327:
+ case 1328:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6675
+//line sql.y:6667
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1328:
+ case 1329:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6679
+//line sql.y:6671
{
yyLOCAL = yyDollar[1].limitUnion()
}
yyVAL.union = yyLOCAL
- case 1329:
+ case 1330:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6685
+//line sql.y:6677
{
yyLOCAL = &Limit{Rowcount: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1330:
+ case 1331:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6689
+//line sql.y:6681
{
yyLOCAL = &Limit{Offset: yyDollar[2].exprUnion(), Rowcount: yyDollar[4].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1331:
+ case 1332:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Limit
-//line sql.y:6693
+//line sql.y:6685
{
yyLOCAL = &Limit{Offset: yyDollar[4].exprUnion(), Rowcount: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1332:
+ case 1333:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6698
+//line sql.y:6690
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1333:
+ case 1334:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6702
+//line sql.y:6694
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1334:
+ case 1335:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6706
+//line sql.y:6698
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1335:
+ case 1336:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6710
+//line sql.y:6702
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1336:
+ case 1337:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []AlterOption
-//line sql.y:6714
+//line sql.y:6706
{
yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1337:
+ case 1338:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6721
+//line sql.y:6713
{
yyLOCAL = &LockOption{Type: DefaultType}
}
yyVAL.union = yyLOCAL
- case 1338:
+ case 1339:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6725
+//line sql.y:6717
{
yyLOCAL = &LockOption{Type: NoneType}
}
yyVAL.union = yyLOCAL
- case 1339:
+ case 1340:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6729
+//line sql.y:6721
{
yyLOCAL = &LockOption{Type: SharedType}
}
yyVAL.union = yyLOCAL
- case 1340:
+ case 1341:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6733
+//line sql.y:6725
{
yyLOCAL = &LockOption{Type: ExclusiveType}
}
yyVAL.union = yyLOCAL
- case 1341:
+ case 1342:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6739
+//line sql.y:6731
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1342:
+ case 1343:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6743
+//line sql.y:6735
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1343:
+ case 1344:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6747
+//line sql.y:6739
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1344:
+ case 1345:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL AlterOption
-//line sql.y:6751
+//line sql.y:6743
{
yyLOCAL = AlgorithmValue(yyDollar[3].str)
}
yyVAL.union = yyLOCAL
- case 1345:
+ case 1346:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6756
+//line sql.y:6748
{
yyVAL.str = ""
}
- case 1346:
+ case 1347:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6760
+//line sql.y:6752
{
yyVAL.str = string(yyDollar[3].str)
}
- case 1347:
+ case 1348:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6764
+//line sql.y:6756
{
yyVAL.str = string(yyDollar[3].str)
}
- case 1348:
+ case 1349:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6768
+//line sql.y:6760
{
yyVAL.str = string(yyDollar[3].str)
}
- case 1349:
+ case 1350:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6773
+//line sql.y:6765
{
yyVAL.str = ""
}
- case 1350:
+ case 1351:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6777
+//line sql.y:6769
{
yyVAL.str = yyDollar[3].str
}
- case 1351:
+ case 1352:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6783
+//line sql.y:6775
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1352:
+ case 1353:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6787
+//line sql.y:6779
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1353:
+ case 1354:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6792
+//line sql.y:6784
{
yyVAL.str = ""
}
- case 1354:
+ case 1355:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:6796
+//line sql.y:6788
{
yyVAL.str = yyDollar[2].str
}
- case 1355:
+ case 1356:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6801
+//line sql.y:6793
{
yyVAL.str = "cascaded"
}
- case 1356:
+ case 1357:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6805
+//line sql.y:6797
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1357:
+ case 1358:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6809
+//line sql.y:6801
{
yyVAL.str = string(yyDollar[1].str)
}
- case 1358:
+ case 1359:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6814
+//line sql.y:6806
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1359:
+ case 1360:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6818
+//line sql.y:6810
{
yyLOCAL = yyDollar[3].definerUnion()
}
yyVAL.union = yyLOCAL
- case 1360:
+ case 1361:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6824
+//line sql.y:6816
{
yyLOCAL = &Definer{
Name: string(yyDollar[1].str),
}
}
yyVAL.union = yyLOCAL
- case 1361:
+ case 1362:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6830
+//line sql.y:6822
{
yyLOCAL = &Definer{
Name: string(yyDollar[1].str),
}
}
yyVAL.union = yyLOCAL
- case 1362:
+ case 1363:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Definer
-//line sql.y:6836
+//line sql.y:6828
{
yyLOCAL = &Definer{
Name: yyDollar[1].str,
@@ -19092,369 +19019,369 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 1363:
+ case 1364:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6845
+//line sql.y:6837
{
yyVAL.str = encodeSQLString(yyDollar[1].str)
}
- case 1364:
+ case 1365:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6849
+//line sql.y:6841
{
yyVAL.str = formatIdentifier(yyDollar[1].str)
}
- case 1365:
+ case 1366:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6854
+//line sql.y:6846
{
yyVAL.str = ""
}
- case 1366:
+ case 1367:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6858
+//line sql.y:6850
{
yyVAL.str = formatAddress(yyDollar[1].str)
}
- case 1367:
+ case 1368:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL Lock
-//line sql.y:6864
+//line sql.y:6856
{
yyLOCAL = ForUpdateLock
}
yyVAL.union = yyLOCAL
- case 1368:
+ case 1369:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL Lock
-//line sql.y:6868
+//line sql.y:6860
{
yyLOCAL = ShareModeLock
}
yyVAL.union = yyLOCAL
- case 1369:
+ case 1370:
yyDollar = yyS[yypt-9 : yypt+1]
var yyLOCAL *SelectInto
-//line sql.y:6874
+//line sql.y:6866
{
yyLOCAL = &SelectInto{Type: IntoOutfileS3, FileName: encodeSQLString(yyDollar[4].str), Charset: yyDollar[5].columnCharset, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str}
}
yyVAL.union = yyLOCAL
- case 1370:
+ case 1371:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SelectInto
-//line sql.y:6878
+//line sql.y:6870
{
yyLOCAL = &SelectInto{Type: IntoDumpfile, FileName: encodeSQLString(yyDollar[3].str), Charset: ColumnCharset{}, FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""}
}
yyVAL.union = yyLOCAL
- case 1371:
+ case 1372:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *SelectInto
-//line sql.y:6882
+//line sql.y:6874
{
yyLOCAL = &SelectInto{Type: IntoOutfile, FileName: encodeSQLString(yyDollar[3].str), Charset: yyDollar[4].columnCharset, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""}
}
yyVAL.union = yyLOCAL
- case 1372:
+ case 1373:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6887
+//line sql.y:6879
{
yyVAL.str = ""
}
- case 1373:
+ case 1374:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6891
+//line sql.y:6883
{
yyVAL.str = " format csv" + yyDollar[3].str
}
- case 1374:
+ case 1375:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6895
+//line sql.y:6887
{
yyVAL.str = " format text" + yyDollar[3].str
}
- case 1375:
+ case 1376:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6900
+//line sql.y:6892
{
yyVAL.str = ""
}
- case 1376:
+ case 1377:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6904
+//line sql.y:6896
{
yyVAL.str = " header"
}
- case 1377:
+ case 1378:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6909
+//line sql.y:6901
{
yyVAL.str = ""
}
- case 1378:
+ case 1379:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6913
+//line sql.y:6905
{
yyVAL.str = " manifest on"
}
- case 1379:
+ case 1380:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6917
+//line sql.y:6909
{
yyVAL.str = " manifest off"
}
- case 1380:
+ case 1381:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6922
+//line sql.y:6914
{
yyVAL.str = ""
}
- case 1381:
+ case 1382:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6926
+//line sql.y:6918
{
yyVAL.str = " overwrite on"
}
- case 1382:
+ case 1383:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6930
+//line sql.y:6922
{
yyVAL.str = " overwrite off"
}
- case 1383:
+ case 1384:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6936
+//line sql.y:6928
{
yyVAL.str = yyDollar[1].str + yyDollar[2].str
}
- case 1384:
+ case 1385:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6941
+//line sql.y:6933
{
yyVAL.str = ""
}
- case 1385:
+ case 1386:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6945
+//line sql.y:6937
{
yyVAL.str = " lines" + yyDollar[2].str
}
- case 1386:
+ case 1387:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6951
+//line sql.y:6943
{
yyVAL.str = yyDollar[1].str
}
- case 1387:
+ case 1388:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6955
+//line sql.y:6947
{
yyVAL.str = yyDollar[1].str + yyDollar[2].str
}
- case 1388:
+ case 1389:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6961
+//line sql.y:6953
{
yyVAL.str = " starting by " + encodeSQLString(yyDollar[3].str)
}
- case 1389:
+ case 1390:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6965
+//line sql.y:6957
{
yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str)
}
- case 1390:
+ case 1391:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:6970
+//line sql.y:6962
{
yyVAL.str = ""
}
- case 1391:
+ case 1392:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6974
+//line sql.y:6966
{
yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str
}
- case 1392:
+ case 1393:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:6980
+//line sql.y:6972
{
yyVAL.str = yyDollar[1].str
}
- case 1393:
+ case 1394:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:6984
+//line sql.y:6976
{
yyVAL.str = yyDollar[1].str + yyDollar[2].str
}
- case 1394:
+ case 1395:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6990
+//line sql.y:6982
{
yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str)
}
- case 1395:
+ case 1396:
yyDollar = yyS[yypt-4 : yypt+1]
-//line sql.y:6994
+//line sql.y:6986
{
yyVAL.str = yyDollar[1].str + " enclosed by " + encodeSQLString(yyDollar[4].str)
}
- case 1396:
+ case 1397:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:6998
+//line sql.y:6990
{
yyVAL.str = " escaped by " + encodeSQLString(yyDollar[3].str)
}
- case 1397:
+ case 1398:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7003
+//line sql.y:6995
{
yyVAL.str = ""
}
- case 1398:
+ case 1399:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7007
+//line sql.y:6999
{
yyVAL.str = " optionally"
}
- case 1399:
+ case 1400:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7020
+//line sql.y:7012
{
yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion()}
}
yyVAL.union = yyLOCAL
- case 1400:
+ case 1401:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7024
+//line sql.y:7016
{
yyLOCAL = &Insert{Rows: yyDollar[1].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 1401:
+ case 1402:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7028
+//line sql.y:7020
{
yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion()}
}
yyVAL.union = yyLOCAL
- case 1402:
+ case 1403:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7032
+//line sql.y:7024
{
yyLOCAL = &Insert{Columns: []IdentifierCI{}, Rows: yyDollar[4].valuesUnion()}
}
yyVAL.union = yyLOCAL
- case 1403:
+ case 1404:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL *Insert
-//line sql.y:7036
+//line sql.y:7028
{
yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[4].selStmtUnion()}
}
yyVAL.union = yyLOCAL
- case 1404:
+ case 1405:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Columns
-//line sql.y:7042
+//line sql.y:7034
{
yyLOCAL = Columns{yyDollar[1].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1405:
+ case 1406:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL Columns
-//line sql.y:7046
+//line sql.y:7038
{
yyLOCAL = Columns{yyDollar[3].identifierCI}
}
yyVAL.union = yyLOCAL
- case 1406:
+ case 1407:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7050
+//line sql.y:7042
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].identifierCI)
}
- case 1407:
+ case 1408:
yyDollar = yyS[yypt-5 : yypt+1]
-//line sql.y:7054
+//line sql.y:7046
{
yySLICE := (*Columns)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[5].identifierCI)
}
- case 1408:
+ case 1409:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL UpdateExprs
-//line sql.y:7059
+//line sql.y:7051
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1409:
+ case 1410:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL UpdateExprs
-//line sql.y:7063
+//line sql.y:7055
{
yyLOCAL = yyDollar[5].updateExprsUnion()
}
yyVAL.union = yyLOCAL
- case 1410:
+ case 1411:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Values
-//line sql.y:7069
+//line sql.y:7061
{
yyLOCAL = Values{yyDollar[1].valTupleUnion()}
}
yyVAL.union = yyLOCAL
- case 1411:
+ case 1412:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7073
+//line sql.y:7065
{
yySLICE := (*Values)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].valTupleUnion())
}
- case 1412:
+ case 1413:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7079
+//line sql.y:7071
{
yyLOCAL = yyDollar[1].valTupleUnion()
}
yyVAL.union = yyLOCAL
- case 1413:
+ case 1414:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7083
+//line sql.y:7075
{
yyLOCAL = ValTuple{}
}
yyVAL.union = yyLOCAL
- case 1414:
+ case 1415:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7089
+//line sql.y:7081
{
yyLOCAL = ValTuple(yyDollar[2].exprsUnion())
}
yyVAL.union = yyLOCAL
- case 1415:
+ case 1416:
yyDollar = yyS[yypt-4 : yypt+1]
var yyLOCAL ValTuple
-//line sql.y:7093
+//line sql.y:7085
{
yyLOCAL = ValTuple(yyDollar[3].exprsUnion())
}
yyVAL.union = yyLOCAL
- case 1416:
+ case 1417:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7098
+//line sql.y:7090
{
if len(yyDollar[1].valTupleUnion()) == 1 {
yyLOCAL = yyDollar[1].valTupleUnion()[0]
@@ -19463,344 +19390,339 @@ yydefault:
}
}
yyVAL.union = yyLOCAL
- case 1417:
+ case 1418:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL UpdateExprs
-//line sql.y:7108
+//line sql.y:7100
{
yyLOCAL = UpdateExprs{yyDollar[1].updateExprUnion()}
}
yyVAL.union = yyLOCAL
- case 1418:
+ case 1419:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7112
+//line sql.y:7104
{
yySLICE := (*UpdateExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].updateExprUnion())
}
- case 1419:
+ case 1420:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *UpdateExpr
-//line sql.y:7118
+//line sql.y:7110
{
yyLOCAL = &UpdateExpr{Name: yyDollar[1].colNameUnion(), Expr: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1420:
+ case 1421:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL SetExprs
-//line sql.y:7124
+//line sql.y:7116
{
yyLOCAL = SetExprs{yyDollar[1].setExprUnion()}
}
yyVAL.union = yyLOCAL
- case 1421:
+ case 1422:
yyDollar = yyS[yypt-3 : yypt+1]
-//line sql.y:7128
+//line sql.y:7120
{
yySLICE := (*SetExprs)(yyIaddr(yyVAL.union))
*yySLICE = append(*yySLICE, yyDollar[3].setExprUnion())
}
- case 1422:
+ case 1423:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SetExpr
-//line sql.y:7134
+//line sql.y:7126
{
yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("on")}
}
yyVAL.union = yyLOCAL
- case 1423:
+ case 1424:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SetExpr
-//line sql.y:7138
+//line sql.y:7130
{
yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("off")}
}
yyVAL.union = yyLOCAL
- case 1424:
+ case 1425:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SetExpr
-//line sql.y:7142
+//line sql.y:7134
{
yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: yyDollar[3].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1425:
+ case 1426:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL *SetExpr
-//line sql.y:7146
+//line sql.y:7138
{
yyLOCAL = &SetExpr{Var: NewSetVariable(string(yyDollar[1].str), SessionScope), Expr: yyDollar[2].exprUnion()}
}
yyVAL.union = yyLOCAL
- case 1426:
+ case 1427:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:7152
+//line sql.y:7144
{
yyLOCAL = NewSetVariable(string(yyDollar[1].str), SessionScope)
}
yyVAL.union = yyLOCAL
- case 1427:
+ case 1428:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:7156
+//line sql.y:7148
{
yyLOCAL = yyDollar[1].variableUnion()
}
yyVAL.union = yyLOCAL
- case 1428:
+ case 1429:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *Variable
-//line sql.y:7160
+//line sql.y:7152
{
yyLOCAL = NewSetVariable(string(yyDollar[2].str), yyDollar[1].scopeUnion())
}
yyVAL.union = yyLOCAL
- case 1430:
+ case 1431:
yyDollar = yyS[yypt-2 : yypt+1]
-//line sql.y:7167
+//line sql.y:7159
{
yyVAL.str = "charset"
}
- case 1433:
+ case 1434:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7177
+//line sql.y:7169
{
yyLOCAL = NewStrLiteral(yyDollar[1].identifierCI.String())
}
yyVAL.union = yyLOCAL
- case 1434:
+ case 1435:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7181
+//line sql.y:7173
{
yyLOCAL = NewStrLiteral(yyDollar[1].str)
}
yyVAL.union = yyLOCAL
- case 1435:
+ case 1436:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Expr
-//line sql.y:7185
+//line sql.y:7177
{
yyLOCAL = &Default{}
}
yyVAL.union = yyLOCAL
- case 1438:
+ case 1439:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:7194
+//line sql.y:7186
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1439:
+ case 1440:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL bool
-//line sql.y:7196
+//line sql.y:7188
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1440:
+ case 1441:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:7199
+//line sql.y:7191
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1441:
+ case 1442:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL bool
-//line sql.y:7201
+//line sql.y:7193
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1442:
+ case 1443:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL bool
-//line sql.y:7204
+//line sql.y:7196
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1443:
+ case 1444:
yyDollar = yyS[yypt-3 : yypt+1]
var yyLOCAL bool
-//line sql.y:7206
+//line sql.y:7198
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1444:
+ case 1445:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Ignore
-//line sql.y:7209
+//line sql.y:7201
{
yyLOCAL = false
}
yyVAL.union = yyLOCAL
- case 1445:
+ case 1446:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Ignore
-//line sql.y:7211
+//line sql.y:7203
{
yyLOCAL = true
}
yyVAL.union = yyLOCAL
- case 1446:
+ case 1447:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7214
+//line sql.y:7206
{
yyVAL.empty = struct{}{}
}
- case 1447:
+ case 1448:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7216
+//line sql.y:7208
{
yyVAL.empty = struct{}{}
}
- case 1448:
+ case 1449:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7218
+//line sql.y:7210
{
yyVAL.empty = struct{}{}
}
- case 1449:
+ case 1450:
yyDollar = yyS[yypt-5 : yypt+1]
var yyLOCAL Statement
-//line sql.y:7222
+//line sql.y:7214
{
yyLOCAL = &CallProc{Name: yyDollar[2].tableName, Params: yyDollar[4].exprsUnion()}
}
yyVAL.union = yyLOCAL
- case 1450:
+ case 1451:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:7227
+//line sql.y:7219
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1451:
+ case 1452:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL Exprs
-//line sql.y:7231
+//line sql.y:7223
{
yyLOCAL = yyDollar[1].exprsUnion()
}
yyVAL.union = yyLOCAL
- case 1452:
+ case 1453:
yyDollar = yyS[yypt-0 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:7236
+//line sql.y:7228
{
yyLOCAL = nil
}
yyVAL.union = yyLOCAL
- case 1453:
+ case 1454:
yyDollar = yyS[yypt-1 : yypt+1]
var yyLOCAL []*IndexOption
-//line sql.y:7238
+//line sql.y:7230
{
yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()}
}
yyVAL.union = yyLOCAL
- case 1454:
+ case 1455:
yyDollar = yyS[yypt-2 : yypt+1]
var yyLOCAL *IndexOption
-//line sql.y:7242
+//line sql.y:7234
{
yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), String: string(yyDollar[2].identifierCI.String())}
}
yyVAL.union = yyLOCAL
- case 1455:
+ case 1456:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7248
+//line sql.y:7240
{
yyVAL.identifierCI = yyDollar[1].identifierCI
}
- case 1456:
+ case 1457:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7252
+//line sql.y:7244
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 1458:
+ case 1459:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7259
+//line sql.y:7251
{
yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str))
}
- case 1459:
+ case 1460:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7265
+//line sql.y:7257
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 1460:
+ case 1461:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7269
+//line sql.y:7261
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 1461:
+ case 1462:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7275
+//line sql.y:7267
{
yyVAL.identifierCS = NewIdentifierCS("")
}
- case 1462:
+ case 1463:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7279
+//line sql.y:7271
{
yyVAL.identifierCS = yyDollar[1].identifierCS
}
- case 1464:
+ case 1465:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7286
+//line sql.y:7278
{
yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str))
}
- case 2006:
+ case 2007:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7854
+//line sql.y:7846
{
- if incNesting(yylex) {
- yylex.Error("max nesting level reached")
- return 1
- }
}
- case 2007:
+ case 2008:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7863
+//line sql.y:7851
{
- decNesting(yylex)
}
- case 2008:
+ case 2009:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7868
+//line sql.y:7855
{
skipToEnd(yylex)
}
- case 2009:
+ case 2010:
yyDollar = yyS[yypt-0 : yypt+1]
-//line sql.y:7873
+//line sql.y:7860
{
skipToEnd(yylex)
}
- case 2010:
+ case 2011:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7877
+//line sql.y:7864
{
skipToEnd(yylex)
}
- case 2011:
+ case 2012:
yyDollar = yyS[yypt-1 : yypt+1]
-//line sql.y:7881
+//line sql.y:7868
{
skipToEnd(yylex)
}
diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y
index 5c2e7c397e1..345f3a0582d 100644
--- a/go/vt/sqlparser/sql.y
+++ b/go/vt/sqlparser/sql.y
@@ -28,18 +28,6 @@ func setDDL(yylex yyLexer, node Statement) {
yylex.(*Tokenizer).partialDDL = node
}
-func incNesting(yylex yyLexer) bool {
- yylex.(*Tokenizer).nesting++
- if yylex.(*Tokenizer).nesting == 200 {
- return true
- }
- return false
-}
-
-func decNesting(yylex yyLexer) {
- yylex.(*Tokenizer).nesting--
-}
-
// skipToEnd forces the lexer to end prematurely. Not all SQL statements
// are supported by the Parser, thus calling skipToEnd will make the lexer
// return EOF early.
@@ -2788,7 +2776,11 @@ insert_method_options:
| LAST
table_opt_value:
- reserved_sql_id
+ table_id '.' reserved_table_id
+ {
+ $$ = String(TableName{Qualifier: $1, Name: $3})
+ }
+| reserved_sql_id
{
$$ = $1.String()
}
@@ -5336,7 +5328,7 @@ function_call_keyword
}
interval_value:
- INTERVAL simple_expr sql_id
+ INTERVAL bit_expr sql_id
{
$$ = &IntervalExpr{Expr: $2, Unit: $3.String()}
}
@@ -7852,16 +7844,11 @@ non_reserved_keyword:
openb:
'('
{
- if incNesting(yylex) {
- yylex.Error("max nesting level reached")
- return 1
- }
}
closeb:
')'
{
- decNesting(yylex)
}
skip_to_end:
diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go
index c3326747b6d..35f7ccf5390 100644
--- a/go/vt/sqlparser/token.go
+++ b/go/vt/sqlparser/token.go
@@ -41,7 +41,6 @@ type Tokenizer struct {
lastToken string
posVarIndex int
partialDDL Statement
- nesting int
multi bool
specialComment *Tokenizer
@@ -699,7 +698,6 @@ func (tkn *Tokenizer) reset() {
tkn.partialDDL = nil
tkn.specialComment = nil
tkn.posVarIndex = 0
- tkn.nesting = 0
tkn.SkipToEnd = false
}
diff --git a/go/vt/srvtopo/discover.go b/go/vt/srvtopo/discover.go
index 91aaea9daf6..2997dc42e21 100644
--- a/go/vt/srvtopo/discover.go
+++ b/go/vt/srvtopo/discover.go
@@ -29,20 +29,23 @@ import (
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
-// FindAllTargets goes through all serving shards in the topology
-// for the provided tablet types. It returns one Target object per
-// keyspace / shard / matching TabletType.
-func FindAllTargets(ctx context.Context, ts Server, cell string, tabletTypes []topodatapb.TabletType) ([]*querypb.Target, error) {
- ksNames, err := ts.GetSrvKeyspaceNames(ctx, cell, true)
- if err != nil {
- return nil, err
+// FindAllTargets goes through all serving shards in the topology for the provided keyspaces
+// and tablet types. If no keyspaces are provided all available keyspaces in the topo are
+// fetched. It returns one Target object per keyspace/shard/matching TabletType.
+func FindAllTargets(ctx context.Context, ts Server, cell string, keyspaces []string, tabletTypes []topodatapb.TabletType) ([]*querypb.Target, error) {
+ var err error
+ if len(keyspaces) == 0 {
+ keyspaces, err = ts.GetSrvKeyspaceNames(ctx, cell, true)
+ if err != nil {
+ return nil, err
+ }
}
var targets []*querypb.Target
var wg sync.WaitGroup
var mu sync.Mutex
var errRecorder concurrency.AllErrorRecorder
- for _, ksName := range ksNames {
+ for _, ksName := range keyspaces {
wg.Add(1)
go func(keyspace string) {
defer wg.Done()
diff --git a/go/vt/srvtopo/discover_test.go b/go/vt/srvtopo/discover_test.go
index c076ba0e7b7..503f98ace1e 100644
--- a/go/vt/srvtopo/discover_test.go
+++ b/go/vt/srvtopo/discover_test.go
@@ -18,11 +18,12 @@ package srvtopo
import (
"context"
- "reflect"
"sort"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
"vitess.io/vitess/go/vt/topo/memorytopo"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -61,16 +62,12 @@ func TestFindAllTargets(t *testing.T) {
rs := NewResilientServer(ts, "TestFindAllKeyspaceShards")
// No keyspace / shards.
- ks, err := FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY})
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if len(ks) > 0 {
- t.Errorf("why did I get anything? %v", ks)
- }
+ ks, err := FindAllTargets(ctx, rs, "cell1", []string{"test_keyspace"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY})
+ assert.NoError(t, err)
+ assert.Len(t, ks, 0)
// Add one.
- if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace", &topodatapb.SrvKeyspace{
+ assert.NoError(t, ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace", &topodatapb.SrvKeyspace{
Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
{
ServedType: topodatapb.TabletType_PRIMARY,
@@ -81,28 +78,34 @@ func TestFindAllTargets(t *testing.T) {
},
},
},
- }); err != nil {
- t.Fatalf("can't add srvKeyspace: %v", err)
- }
+ }))
// Get it.
- ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY})
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !reflect.DeepEqual(ks, []*querypb.Target{
+ ks, err = FindAllTargets(ctx, rs, "cell1", []string{"test_keyspace"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY})
+ assert.NoError(t, err)
+ assert.EqualValues(t, []*querypb.Target{
{
Cell: "cell1",
Keyspace: "test_keyspace",
Shard: "test_shard0",
TabletType: topodatapb.TabletType_PRIMARY,
},
- }) {
- t.Errorf("got wrong value: %v", ks)
- }
+ }, ks)
+
+ // Get any keyspace.
+ ks, err = FindAllTargets(ctx, rs, "cell1", nil, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY})
+ assert.NoError(t, err)
+ assert.EqualValues(t, []*querypb.Target{
+ {
+ Cell: "cell1",
+ Keyspace: "test_keyspace",
+ Shard: "test_shard0",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ }, ks)
// Add another one.
- if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace2", &topodatapb.SrvKeyspace{
+ assert.NoError(t, ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace2", &topodatapb.SrvKeyspace{
Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{
{
ServedType: topodatapb.TabletType_PRIMARY,
@@ -121,17 +124,13 @@ func TestFindAllTargets(t *testing.T) {
},
},
},
- }); err != nil {
- t.Fatalf("can't add srvKeyspace: %v", err)
- }
+ }))
- // Get it for all types.
- ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA})
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
+ // Get it for any keyspace, all types.
+ ks, err = FindAllTargets(ctx, rs, "cell1", nil, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA})
+ assert.NoError(t, err)
sort.Sort(TargetArray(ks))
- if !reflect.DeepEqual(ks, []*querypb.Target{
+ assert.EqualValues(t, []*querypb.Target{
{
Cell: "cell1",
Keyspace: "test_keyspace",
@@ -150,23 +149,40 @@ func TestFindAllTargets(t *testing.T) {
Shard: "test_shard2",
TabletType: topodatapb.TabletType_REPLICA,
},
- }) {
- t.Errorf("got wrong value: %v", ks)
- }
+ }, ks)
- // Only get the REPLICA targets.
- ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_REPLICA})
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if !reflect.DeepEqual(ks, []*querypb.Target{
+ // Only get 1 keyspace for all types.
+ ks, err = FindAllTargets(ctx, rs, "cell1", []string{"test_keyspace2"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA})
+ assert.NoError(t, err)
+ assert.EqualValues(t, []*querypb.Target{
+ {
+ Cell: "cell1",
+ Keyspace: "test_keyspace2",
+ Shard: "test_shard1",
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
{
Cell: "cell1",
Keyspace: "test_keyspace2",
Shard: "test_shard2",
TabletType: topodatapb.TabletType_REPLICA,
},
- }) {
- t.Errorf("got wrong value: %v", ks)
- }
+ }, ks)
+
+ // Only get the REPLICA targets for any keyspace.
+ ks, err = FindAllTargets(ctx, rs, "cell1", []string{}, []topodatapb.TabletType{topodatapb.TabletType_REPLICA})
+ assert.NoError(t, err)
+ assert.Equal(t, []*querypb.Target{
+ {
+ Cell: "cell1",
+ Keyspace: "test_keyspace2",
+ Shard: "test_shard2",
+ TabletType: topodatapb.TabletType_REPLICA,
+ },
+ }, ks)
+
+ // Get non-existent keyspace.
+ ks, err = FindAllTargets(ctx, rs, "cell1", []string{"doesnt-exist"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA})
+ assert.NoError(t, err)
+ assert.Len(t, ks, 0)
}
diff --git a/go/vt/srvtopo/query.go b/go/vt/srvtopo/query.go
index 098f5c77bc1..ec1ed50100a 100644
--- a/go/vt/srvtopo/query.go
+++ b/go/vt/srvtopo/query.go
@@ -86,7 +86,12 @@ func (q *resilientQuery) getCurrentValue(ctx context.Context, wkey fmt.Stringer,
// If it is not time to check again, then return either the cached
// value or the cached error but don't ask topo again.
- if !shouldRefresh {
+ // Here we have to be careful with the part where we haven't gotten even the first result.
+ // In that case, a refresh is already in progress, but the cache is empty! So, we can't use the cache.
+ // We have to wait for the query's results.
+ // We know the query has run at least once if the insertionTime is non-zero, or if we have an error.
+ queryRanAtLeastOnce := !entry.insertionTime.IsZero() || entry.lastError != nil
+ if !shouldRefresh && queryRanAtLeastOnce {
if cacheValid {
return entry.value, nil
}
diff --git a/go/vt/srvtopo/query_test.go b/go/vt/srvtopo/query_test.go
new file mode 100644
index 00000000000..2569a2ad420
--- /dev/null
+++ b/go/vt/srvtopo/query_test.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package srvtopo
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "vitess.io/vitess/go/stats"
+)
+
+// TestResilientQueryGetCurrentValueInitialization tests that the resilient query returns the correct results when it has been
+// initialized.
+func TestResilientQueryGetCurrentValueInitialization(t *testing.T) {
+ // Create a basic query, which doesn't do anything other than return the same cell it got as an input.
+ // The query however needs to simulate being slow, so we have a sleep in there.
+ query := func(ctx context.Context, entry *queryEntry) (any, error) {
+ time.Sleep(1 * time.Second)
+ cell := entry.key.(cellName)
+ return cell, nil
+ }
+ counts := stats.NewCountersWithSingleLabel("TestResilientQueryGetCurrentValue", "Test for resilient query", "type")
+
+ // Create the resilient query
+ rq := &resilientQuery{
+ query: query,
+ counts: counts,
+ cacheRefreshInterval: 5 * time.Second,
+ cacheTTL: 5 * time.Second,
+ entries: make(map[string]*queryEntry),
+ }
+
+ // Create a context and a cell.
+ ctx := context.Background()
+ cell := cellName("cell-1")
+
+ // Hammer the resilient query with multiple get requests just as it is created.
+ // We expect all of them to work.
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ // To test with both stale and not-stale, we use the modulo of our index.
+ stale := i%2 == 0
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ res, err := rq.getCurrentValue(ctx, cell, stale)
+ // Assert that we don't have any error and the value matches what we want.
+ assert.NoError(t, err)
+ assert.EqualValues(t, cell, res)
+ }()
+ }
+ // Wait for the wait group to be empty, otherwise the test is marked a success before any of the go routines finish completion!
+ wg.Wait()
+}
diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go
index 3593bc0806d..91d7e45f92c 100644
--- a/go/vt/throttler/demo/throttler_demo.go
+++ b/go/vt/throttler/demo/throttler_demo.go
@@ -101,7 +101,7 @@ type replica struct {
// throttler is used to enforce the maximum rate at which replica applies
// transactions. It must not be confused with the client's throttler.
- throttler *throttler.Throttler
+ throttler throttler.Throttler
lastHealthUpdate time.Time
lagUpdateInterval time.Duration
@@ -224,7 +224,7 @@ type client struct {
primary *primary
healthCheck discovery.HealthCheck
- throttler *throttler.Throttler
+ throttler throttler.Throttler
stopChan chan struct{}
wg sync.WaitGroup
@@ -237,7 +237,7 @@ func newClient(primary *primary, replica *replica, ts *topo.Server) *client {
log.Fatal(err)
}
- healthCheck := discovery.NewHealthCheck(context.Background(), 5*time.Second, 1*time.Minute, ts, "cell1", "")
+ healthCheck := discovery.NewHealthCheck(context.Background(), 5*time.Second, 1*time.Minute, ts, "cell1", "", nil)
c := &client{
primary: primary,
healthCheck: healthCheck,
diff --git a/go/vt/throttler/manager.go b/go/vt/throttler/manager.go
index c2ee9f0a652..ee142190f75 100644
--- a/go/vt/throttler/manager.go
+++ b/go/vt/throttler/manager.go
@@ -64,16 +64,16 @@ type managerImpl struct {
// mu guards all fields in this group.
mu sync.Mutex
// throttlers tracks all running throttlers (by their name).
- throttlers map[string]*Throttler
+ throttlers map[string]Throttler
}
func newManager() *managerImpl {
return &managerImpl{
- throttlers: make(map[string]*Throttler),
+ throttlers: make(map[string]Throttler),
}
}
-func (m *managerImpl) registerThrottler(name string, throttler *Throttler) error {
+func (m *managerImpl) registerThrottler(name string, throttler Throttler) error {
m.mu.Lock()
defer m.mu.Unlock()
@@ -207,7 +207,7 @@ func (m *managerImpl) throttlerNamesLocked() []string {
// log returns the most recent changes of the MaxReplicationLag module.
// There will be one result for each processed replication lag record.
-func (m *managerImpl) log(throttlerName string) ([]result, error) {
+func (m *managerImpl) log(throttlerName string) ([]Result, error) {
m.mu.Lock()
defer m.mu.Unlock()
@@ -216,5 +216,5 @@ func (m *managerImpl) log(throttlerName string) ([]result, error) {
return nil, fmt.Errorf("throttler: %v does not exist", throttlerName)
}
- return t.log(), nil
+ return t.Log(), nil
}
diff --git a/go/vt/throttler/manager_test.go b/go/vt/throttler/manager_test.go
index 8c0e6ae4563..a483ce9dc8f 100644
--- a/go/vt/throttler/manager_test.go
+++ b/go/vt/throttler/manager_test.go
@@ -37,7 +37,7 @@ var (
type managerTestFixture struct {
m *managerImpl
- t1, t2 *Throttler
+ t1, t2 Throttler
}
func (f *managerTestFixture) setUp() error {
diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go
index f8037f7f975..f94f6fabf4a 100644
--- a/go/vt/throttler/max_replication_lag_module.go
+++ b/go/vt/throttler/max_replication_lag_module.go
@@ -301,12 +301,18 @@ func (m *MaxReplicationLagModule) recalculateRate(lagRecordNow replicationLagRec
if lagRecordNow.isZero() {
panic("rate recalculation was triggered with a zero replication lag record")
}
+
+ // Protect against nil stats
+ if lagRecordNow.Stats == nil {
+ return
+ }
+
now := lagRecordNow.time
lagNow := lagRecordNow.lag()
m.memory.ageBadRate(now)
- r := result{
+ r := Result{
Now: now,
RateChange: unchangedRate,
lastRateChange: m.lastRateChange,
@@ -375,7 +381,6 @@ logResult:
r.Reason += clearReason
}
- log.Infof("%v", r)
m.results.add(r)
}
@@ -440,7 +445,7 @@ func stateGreater(a, b state) bool {
// and we should not skip the current replica ("lagRecordNow").
// Even if it's the same replica we may skip it and return false because
// we want to wait longer for the propagation of the current rate change.
-func (m *MaxReplicationLagModule) isReplicaUnderTest(r *result, now time.Time, testedState state, lagRecordNow replicationLagRecord) bool {
+func (m *MaxReplicationLagModule) isReplicaUnderTest(r *Result, now time.Time, testedState state, lagRecordNow replicationLagRecord) bool {
if m.replicaUnderTest == nil {
return true
}
@@ -466,7 +471,7 @@ func (m *MaxReplicationLagModule) isReplicaUnderTest(r *result, now time.Time, t
return true
}
-func (m *MaxReplicationLagModule) increaseRate(r *result, now time.Time, lagRecordNow replicationLagRecord) {
+func (m *MaxReplicationLagModule) increaseRate(r *Result, now time.Time, lagRecordNow replicationLagRecord) {
m.markCurrentRateAsBadOrGood(r, now, stateIncreaseRate, unknown)
oldRate := m.rate.Get()
@@ -554,7 +559,7 @@ func (m *MaxReplicationLagModule) minTestDurationUntilNextIncrease(increase floa
return minDuration
}
-func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *result, now time.Time, lagRecordNow replicationLagRecord) {
+func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *Result, now time.Time, lagRecordNow replicationLagRecord) {
// Guess replication rate based on the difference in the replication lag of this
// particular replica.
lagRecordBefore := m.lagCache(lagRecordNow).atOrAfter(discovery.TabletToMapKey(lagRecordNow.Tablet), m.lastRateChange)
@@ -625,7 +630,7 @@ func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *result, now time.Time,
// guessReplicationRate guesses the actual replication rate based on the new bac
// Note that "lagDifference" can be positive (lag increased) or negative (lag
// decreased).
-func (m *MaxReplicationLagModule) guessReplicationRate(r *result, avgPrimaryRate float64, lagBefore, lagNow int64, lagDifference, d time.Duration) (int64, string) {
+func (m *MaxReplicationLagModule) guessReplicationRate(r *Result, avgPrimaryRate float64, lagBefore, lagNow int64, lagDifference, d time.Duration) (int64, string) {
// avgReplicationRate is the average rate (per second) at which the replica
// applied transactions from the replication stream. We infer the value
// from the relative change in the replication lag.
@@ -670,14 +675,14 @@ func (m *MaxReplicationLagModule) guessReplicationRate(r *result, avgPrimaryRate
return int64(newRate), reason
}
-func (m *MaxReplicationLagModule) emergency(r *result, now time.Time, lagRecordNow replicationLagRecord) {
+func (m *MaxReplicationLagModule) emergency(r *Result, now time.Time, lagRecordNow replicationLagRecord) {
m.markCurrentRateAsBadOrGood(r, now, stateEmergency, unknown)
decreaseReason := fmt.Sprintf("replication lag went beyond max: %d > %d", lagRecordNow.lag(), m.config.MaxReplicationLagSec)
m.decreaseRateByPercentage(r, now, lagRecordNow, stateEmergency, m.config.EmergencyDecrease, decreaseReason)
}
-func (m *MaxReplicationLagModule) decreaseRateByPercentage(r *result, now time.Time, lagRecordNow replicationLagRecord, newState state, decrease float64, decreaseReason string) {
+func (m *MaxReplicationLagModule) decreaseRateByPercentage(r *Result, now time.Time, lagRecordNow replicationLagRecord, newState state, decrease float64, decreaseReason string) {
oldRate := m.rate.Get()
rate := int64(float64(oldRate) - float64(oldRate)*decrease)
if rate == 0 {
@@ -689,7 +694,7 @@ func (m *MaxReplicationLagModule) decreaseRateByPercentage(r *result, now time.T
m.updateRate(r, newState, rate, reason, now, lagRecordNow, m.config.MinDurationBetweenDecreases())
}
-func (m *MaxReplicationLagModule) updateRate(r *result, newState state, rate int64, reason string, now time.Time, lagRecordNow replicationLagRecord, testDuration time.Duration) {
+func (m *MaxReplicationLagModule) updateRate(r *Result, newState state, rate int64, reason string, now time.Time, lagRecordNow replicationLagRecord, testDuration time.Duration) {
oldRate := m.rate.Get()
m.currentState = newState
@@ -717,7 +722,7 @@ func (m *MaxReplicationLagModule) updateRate(r *result, newState state, rate int
// markCurrentRateAsBadOrGood determines the actual rate between the last rate
// change and "now" and determines if that rate was bad or good.
-func (m *MaxReplicationLagModule) markCurrentRateAsBadOrGood(r *result, now time.Time, newState state, replicationLagChange replicationLagChange) {
+func (m *MaxReplicationLagModule) markCurrentRateAsBadOrGood(r *Result, now time.Time, newState state, replicationLagChange replicationLagChange) {
if m.lastRateChange.IsZero() {
// Module was just started. We don't have any data points yet.
r.GoodOrBad = ignoredRate
@@ -791,6 +796,6 @@ func (m *MaxReplicationLagModule) markCurrentRateAsBadOrGood(r *result, now time
}
}
-func (m *MaxReplicationLagModule) log() []result {
+func (m *MaxReplicationLagModule) log() []Result {
return m.results.latestValues()
}
diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go
index f0324df192c..6379b067412 100644
--- a/go/vt/throttler/max_replication_lag_module_test.go
+++ b/go/vt/throttler/max_replication_lag_module_test.go
@@ -22,6 +22,8 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/assert"
+
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/discovery"
@@ -83,6 +85,12 @@ func (tf *testFixture) process(lagRecord replicationLagRecord) {
tf.m.processRecord(lagRecord)
}
+// recalculateRate does the same thing as MaxReplicationLagModule.recalculateRate() does
+// for a new "lagRecord".
+func (tf *testFixture) recalculateRate(lagRecord replicationLagRecord) {
+ tf.m.recalculateRate(lagRecord)
+}
+
func (tf *testFixture) checkState(state state, rate int64, lastRateChange time.Time) error {
if got, want := tf.m.currentState, state; got != want {
return fmt.Errorf("module in wrong state. got = %v, want = %v", got, want)
@@ -96,6 +104,47 @@ func (tf *testFixture) checkState(state state, rate int64, lastRateChange time.T
return nil
}
+func TestNewMaxReplicationLagModule_recalculateRate(t *testing.T) {
+ testCases := []struct {
+ name string
+ lagRecord replicationLagRecord
+ expectPanic bool
+ }{
+ {
+ name: "Zero lag",
+ lagRecord: replicationLagRecord{
+ time: time.Time{},
+ TabletHealth: discovery.TabletHealth{Stats: nil},
+ },
+ expectPanic: true,
+ },
+ {
+ name: "nil lag record stats",
+ lagRecord: replicationLagRecord{
+ time: time.Now(),
+ TabletHealth: discovery.TabletHealth{Stats: nil},
+ },
+ expectPanic: false,
+ },
+ }
+
+ for _, aTestCase := range testCases {
+ theCase := aTestCase
+
+ t.Run(theCase.name, func(t *testing.T) {
+ t.Parallel()
+
+ fixture, err := newTestFixtureWithMaxReplicationLag(5)
+ assert.NoError(t, err)
+
+ if theCase.expectPanic {
+ assert.Panics(t, func() { fixture.recalculateRate(theCase.lagRecord) })
+ }
+ },
+ )
+ }
+}
+
func TestMaxReplicationLagModule_RateNotZeroWhenDisabled(t *testing.T) {
tf, err := newTestFixtureWithMaxReplicationLag(ReplicationLagModuleDisabled)
if err != nil {
diff --git a/go/vt/throttler/replication_lag_cache.go b/go/vt/throttler/replication_lag_cache.go
index c9c2e94f113..ab26c0bc6b8 100644
--- a/go/vt/throttler/replication_lag_cache.go
+++ b/go/vt/throttler/replication_lag_cache.go
@@ -18,6 +18,7 @@ package throttler
import (
"sort"
+ "sync"
"time"
"vitess.io/vitess/go/vt/discovery"
@@ -30,6 +31,8 @@ type replicationLagCache struct {
// The map key is replicationLagRecord.LegacyTabletStats.Key.
entries map[string]*replicationLagHistory
+ mu sync.Mutex
+
// slowReplicas is a set of slow replicas.
// The map key is replicationLagRecord.LegacyTabletStats.Key.
// This map will always be recomputed by sortByLag() and must not be modified
@@ -60,6 +63,9 @@ func newReplicationLagCache(historyCapacityPerReplica int) *replicationLagCache
// add inserts or updates "r" in the cache for the replica with the key "r.Key".
func (c *replicationLagCache) add(r replicationLagRecord) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
if !r.Serving {
// Tablet is down. Do no longer track it.
delete(c.entries, discovery.TabletToMapKey(r.Tablet))
@@ -76,9 +82,35 @@ func (c *replicationLagCache) add(r replicationLagRecord) {
entry.add(r)
}
+// maxLag returns the maximum replication lag for the entries in cache.
+func (c *replicationLagCache) maxLag() (maxLag uint32) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for key := range c.entries {
+ if c.isIgnored(key) {
+ continue
+ }
+
+ entry, ok := c.entries[key]
+ if !ok {
+ continue
+ }
+
+ latest := entry.latest()
+ if lag := latest.Stats.ReplicationLagSeconds; lag > maxLag {
+ maxLag = lag
+ }
+ }
+
+ return maxLag
+}
+
// latest returns the current lag record for the given LegacyTabletStats.Key string.
// A zero record is returned if there is no latest entry.
func (c *replicationLagCache) latest(key string) replicationLagRecord {
+ c.mu.Lock()
+ defer c.mu.Unlock()
entry, ok := c.entries[key]
if !ok {
return replicationLagRecord{}
@@ -90,6 +122,8 @@ func (c *replicationLagCache) latest(key string) replicationLagRecord {
// or just after it.
// If there is no such record, a zero record is returned.
func (c *replicationLagCache) atOrAfter(key string, at time.Time) replicationLagRecord {
+ c.mu.Lock()
+ defer c.mu.Unlock()
entry, ok := c.entries[key]
if !ok {
return replicationLagRecord{}
@@ -100,6 +134,9 @@ func (c *replicationLagCache) atOrAfter(key string, at time.Time) replicationLag
// sortByLag sorts all replicas by their latest replication lag value and
// tablet uid and updates the c.slowReplicas set.
func (c *replicationLagCache) sortByLag(ignoreNSlowestReplicas int, minimumReplicationLag int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
// Reset the current list of ignored replicas.
c.slowReplicas = make(map[string]bool)
@@ -142,6 +179,9 @@ func (a byLagAndTabletUID) Less(i, j int) bool {
// this slow replica.
// "key" refers to ReplicationLagRecord.LegacyTabletStats.Key.
func (c *replicationLagCache) ignoreSlowReplica(key string) bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
if len(c.slowReplicas) == 0 {
// No slow replicas at all.
return false
diff --git a/go/vt/throttler/replication_lag_cache_test.go b/go/vt/throttler/replication_lag_cache_test.go
index 312f97e1999..9b34210d096 100644
--- a/go/vt/throttler/replication_lag_cache_test.go
+++ b/go/vt/throttler/replication_lag_cache_test.go
@@ -20,6 +20,8 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/vt/discovery"
)
@@ -91,3 +93,10 @@ func TestReplicationLagCache_SortByLag(t *testing.T) {
t.Fatal("r1 should be tracked as a slow replica")
}
}
+
+func TestReplicationLagCache_MaxLag(t *testing.T) {
+ c := newReplicationLagCache(2)
+ c.add(lagRecord(sinceZero(1*time.Second), r1, 30))
+ c.add(lagRecord(sinceZero(1*time.Second), r2, 1))
+ require.Equal(t, uint32(30), c.maxLag())
+}
diff --git a/go/vt/throttler/result.go b/go/vt/throttler/result.go
index 179711116a3..8af02e58a3b 100644
--- a/go/vt/throttler/result.go
+++ b/go/vt/throttler/result.go
@@ -50,10 +50,10 @@ state (old/tested/new): {{.OldState}}/{{.TestedState}}/{{.NewState}}
lag before: {{.LagBefore}} ({{.AgeOfBeforeLag}} ago) rates (primary/replica): {{.PrimaryRate}}/{{.GuessedReplicationRate}} backlog (old/new): {{.GuessedReplicationBacklogOld}}/{{.GuessedReplicationBacklogNew}}
reason: {{.Reason}}`))
-// result is generated by the MaxReplicationLag module for each processed
+// Result is generated by the MaxReplicationLag module for each processed
// "replicationLagRecord".
// It captures the details and the decision of the processing.
-type result struct {
+type Result struct {
Now time.Time
RateChange rateChange
lastRateChange time.Time
@@ -80,7 +80,7 @@ type result struct {
GuessedReplicationBacklogNew int
}
-func (r result) String() string {
+func (r Result) String() string {
var b bytes.Buffer
if err := resultStringTemplate.Execute(&b, r); err != nil {
panic(fmt.Sprintf("failed to Execute() template: %v", err))
@@ -88,25 +88,25 @@ func (r result) String() string {
return b.String()
}
-func (r result) Alias() string {
+func (r Result) Alias() string {
return topoproto.TabletAliasString(r.LagRecordNow.Tablet.Alias)
}
-func (r result) TimeSinceLastRateChange() string {
+func (r Result) TimeSinceLastRateChange() string {
if r.lastRateChange.IsZero() {
return "n/a"
}
return fmt.Sprintf("%.1fs", r.Now.Sub(r.lastRateChange).Seconds())
}
-func (r result) LagBefore() string {
+func (r Result) LagBefore() string {
if r.LagRecordBefore.isZero() {
return "n/a"
}
return fmt.Sprintf("%ds", r.LagRecordBefore.Stats.ReplicationLagSeconds)
}
-func (r result) AgeOfBeforeLag() string {
+func (r Result) AgeOfBeforeLag() string {
if r.LagRecordBefore.isZero() {
return "n/a"
}
@@ -123,18 +123,18 @@ type resultRing struct {
// started reusing entries.
wrapped bool
// values is the underlying ring buffer.
- values []result
+ values []Result
}
// newResultRing creates a new resultRing.
func newResultRing(capacity int) *resultRing {
return &resultRing{
- values: make([]result, capacity),
+ values: make([]Result, capacity),
}
}
// add inserts a new result into the ring buffer.
-func (rr *resultRing) add(r result) {
+func (rr *resultRing) add(r Result) {
rr.mu.Lock()
defer rr.mu.Unlock()
@@ -148,7 +148,7 @@ func (rr *resultRing) add(r result) {
// latestValues returns all values of the buffer. Entries are sorted in reverse
// chronological order i.e. newer items come first.
-func (rr *resultRing) latestValues() []result {
+func (rr *resultRing) latestValues() []Result {
rr.mu.Lock()
defer rr.mu.Unlock()
@@ -162,7 +162,7 @@ func (rr *resultRing) latestValues() []result {
count = rr.position
}
- results := make([]result, count)
+ results := make([]Result, count)
for i := 0; i < count; i++ {
pos := start - i
if pos < 0 {
diff --git a/go/vt/throttler/result_test.go b/go/vt/throttler/result_test.go
index 9efc7df9412..9eadab503e8 100644
--- a/go/vt/throttler/result_test.go
+++ b/go/vt/throttler/result_test.go
@@ -23,7 +23,7 @@ import (
)
var (
- resultIncreased = result{
+ resultIncreased = Result{
Now: sinceZero(1234 * time.Millisecond),
RateChange: increasedRate,
lastRateChange: sinceZero(1 * time.Millisecond),
@@ -45,7 +45,7 @@ var (
GuessedReplicationBacklogOld: 0,
GuessedReplicationBacklogNew: 0,
}
- resultDecreased = result{
+ resultDecreased = Result{
Now: sinceZero(5000 * time.Millisecond),
RateChange: decreasedRate,
lastRateChange: sinceZero(1234 * time.Millisecond),
@@ -67,7 +67,7 @@ var (
GuessedReplicationBacklogOld: 10,
GuessedReplicationBacklogNew: 20,
}
- resultEmergency = result{
+ resultEmergency = Result{
Now: sinceZero(10123 * time.Millisecond),
RateChange: decreasedRate,
lastRateChange: sinceZero(5000 * time.Millisecond),
@@ -93,7 +93,7 @@ var (
func TestResultString(t *testing.T) {
testcases := []struct {
- r result
+ r Result
want string
}{
{
@@ -135,27 +135,27 @@ reason: emergency state decreased the rate`,
func TestResultRing(t *testing.T) {
// Test data.
- r1 := result{Reason: "r1"}
- r2 := result{Reason: "r2"}
- r3 := result{Reason: "r3"}
+ r1 := Result{Reason: "r1"}
+ r2 := Result{Reason: "r2"}
+ r3 := Result{Reason: "r3"}
rr := newResultRing(2)
// Use the ring partially.
rr.add(r1)
- if got, want := rr.latestValues(), []result{r1}; !reflect.DeepEqual(got, want) {
+ if got, want := rr.latestValues(), []Result{r1}; !reflect.DeepEqual(got, want) {
t.Fatalf("items not correctly added to resultRing. got = %v, want = %v", got, want)
}
// Use it fully.
rr.add(r2)
- if got, want := rr.latestValues(), []result{r2, r1}; !reflect.DeepEqual(got, want) {
+ if got, want := rr.latestValues(), []Result{r2, r1}; !reflect.DeepEqual(got, want) {
t.Fatalf("items not correctly added to resultRing. got = %v, want = %v", got, want)
}
// Let it wrap.
rr.add(r3)
- if got, want := rr.latestValues(), []result{r3, r2}; !reflect.DeepEqual(got, want) {
+ if got, want := rr.latestValues(), []Result{r3, r2}; !reflect.DeepEqual(got, want) {
t.Fatalf("resultRing did not wrap correctly. got = %v, want = %v", got, want)
}
}
diff --git a/go/vt/throttler/throttler.go b/go/vt/throttler/throttler.go
index 03a20013396..cd237548b3b 100644
--- a/go/vt/throttler/throttler.go
+++ b/go/vt/throttler/throttler.go
@@ -35,8 +35,10 @@ import (
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/proto/topodata"
throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
const (
@@ -65,7 +67,22 @@ const (
InvalidMaxReplicationLag = -1
)
-// Throttler provides a client-side, thread-aware throttler.
+// Throttler defines the throttler interface.
+type Throttler interface {
+ Throttle(threadID int) time.Duration
+ ThreadFinished(threadID int)
+ Close()
+ MaxRate() int64
+ SetMaxRate(rate int64)
+ RecordReplicationLag(time time.Time, th *discovery.TabletHealth)
+ GetConfiguration() *throttlerdatapb.Configuration
+ UpdateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error
+ ResetConfiguration()
+ MaxLag(tabletType topodatapb.TabletType) uint32
+ Log() []Result
+}
+
+// ThrottlerImpl implements a client-side, thread-aware throttler.
// See the package doc for more information.
//
// Calls of Throttle() and ThreadFinished() take threadID as parameter which is
@@ -73,7 +90,7 @@ const (
// NOTE: Trottle() and ThreadFinished() assume that *per thread* calls to them
//
// are serialized and must not happen concurrently.
-type Throttler struct {
+type ThrottlerImpl struct {
// name describes the Throttler instance and is used e.g. in the webinterface.
name string
// unit describes the entity the throttler is limiting e.g. "queries" or
@@ -126,23 +143,35 @@ type Throttler struct {
// unit refers to the type of entity you want to throttle e.g. "queries" or
// "transactions".
// name describes the Throttler instance and will be used by the webinterface.
-func NewThrottler(name, unit string, threadCount int, maxRate, maxReplicationLag int64) (*Throttler, error) {
+func NewThrottler(name, unit string, threadCount int, maxRate, maxReplicationLag int64) (Throttler, error) {
return newThrottler(GlobalManager, name, unit, threadCount, maxRate, maxReplicationLag, time.Now)
}
-func newThrottler(manager *managerImpl, name, unit string, threadCount int, maxRate, maxReplicationLag int64, nowFunc func() time.Time) (*Throttler, error) {
- // Verify input parameters.
- if maxRate < 0 {
- return nil, fmt.Errorf("maxRate must be >= 0: %v", maxRate)
+func NewThrottlerFromConfig(name, unit string, threadCount int, maxRateModuleMaxRate int64, maxReplicationLagModuleConfig MaxReplicationLagModuleConfig, nowFunc func() time.Time) (Throttler, error) {
+ return newThrottlerFromConfig(GlobalManager, name, unit, threadCount, maxRateModuleMaxRate, maxReplicationLagModuleConfig, nowFunc)
+}
+
+func newThrottler(manager *managerImpl, name, unit string, threadCount int, maxRate, maxReplicationLag int64, nowFunc func() time.Time) (Throttler, error) {
+ config := NewMaxReplicationLagModuleConfig(maxReplicationLag)
+ config.MaxReplicationLagSec = maxReplicationLag
+
+ return newThrottlerFromConfig(manager, name, unit, threadCount, maxRate, config, nowFunc)
+
+}
+
+func newThrottlerFromConfig(manager *managerImpl, name, unit string, threadCount int, maxRateModuleMaxRate int64, maxReplicationLagModuleConfig MaxReplicationLagModuleConfig, nowFunc func() time.Time) (Throttler, error) {
+ err := maxReplicationLagModuleConfig.Verify()
+ if err != nil {
+ return nil, fmt.Errorf("invalid max replication lag config: %w", err)
}
- if maxReplicationLag < 0 {
- return nil, fmt.Errorf("maxReplicationLag must be >= 0: %v", maxReplicationLag)
+ if maxRateModuleMaxRate < 0 {
+ return nil, fmt.Errorf("maxRate must be >= 0: %v", maxRateModuleMaxRate)
}
// Enable the configured modules.
- maxRateModule := NewMaxRateModule(maxRate)
+ maxRateModule := NewMaxRateModule(maxRateModuleMaxRate)
actualRateHistory := newAggregatedIntervalHistory(1024, 1*time.Second, threadCount)
- maxReplicationLagModule, err := NewMaxReplicationLagModule(NewMaxReplicationLagModuleConfig(maxReplicationLag), actualRateHistory, nowFunc)
+ maxReplicationLagModule, err := NewMaxReplicationLagModule(maxReplicationLagModuleConfig, actualRateHistory, nowFunc)
if err != nil {
return nil, err
}
@@ -163,7 +192,7 @@ func newThrottler(manager *managerImpl, name, unit string, threadCount int, maxR
threadThrottlers[i] = newThreadThrottler(i, actualRateHistory)
runningThreads[i] = true
}
- t := &Throttler{
+ t := &ThrottlerImpl{
name: name,
unit: unit,
manager: manager,
@@ -202,7 +231,7 @@ func newThrottler(manager *managerImpl, name, unit string, threadCount int, maxR
// the backoff duration elapsed.
// The maximum value for the returned backoff is 1 second since the throttler
// internally operates on a per-second basis.
-func (t *Throttler) Throttle(threadID int) time.Duration {
+func (t *ThrottlerImpl) Throttle(threadID int) time.Duration {
if t.closed {
panic(fmt.Sprintf("BUG: thread with ID: %v must not access closed Throttler", threadID))
}
@@ -212,10 +241,20 @@ func (t *Throttler) Throttle(threadID int) time.Duration {
return t.threadThrottlers[threadID].throttle(t.nowFunc())
}
+// MaxLag returns the max of all the last replication lag values seen across all tablets of
+// the provided type, excluding ignored tablets.
+func (t *ThrottlerImpl) MaxLag(tabletType topodata.TabletType) uint32 {
+ cache := t.maxReplicationLagModule.lagCacheByType(tabletType)
+ if cache == nil {
+ return 0
+ }
+ return cache.maxLag()
+}
+
// ThreadFinished marks threadID as finished and redistributes the thread's
// rate allotment across the other threads.
// After ThreadFinished() is called, Throttle() must not be called anymore.
-func (t *Throttler) ThreadFinished(threadID int) {
+func (t *ThrottlerImpl) ThreadFinished(threadID int) {
if t.threadFinished[threadID] {
panic(fmt.Sprintf("BUG: thread with ID: %v already finished", threadID))
}
@@ -230,7 +269,7 @@ func (t *Throttler) ThreadFinished(threadID int) {
// Close stops all modules and frees all resources.
// When Close() returned, the Throttler object must not be used anymore.
-func (t *Throttler) Close() {
+func (t *ThrottlerImpl) Close() {
for _, m := range t.modules {
m.Stop()
}
@@ -243,7 +282,7 @@ func (t *Throttler) Close() {
// threadThrottlers accordingly.
// The rate changes when the number of thread changes or a module updated its
// max rate.
-func (t *Throttler) updateMaxRate() {
+func (t *ThrottlerImpl) updateMaxRate() {
// Set it to infinite initially.
maxRate := int64(math.MaxInt64)
@@ -284,39 +323,39 @@ func (t *Throttler) updateMaxRate() {
}
// MaxRate returns the current rate of the MaxRateModule.
-func (t *Throttler) MaxRate() int64 {
+func (t *ThrottlerImpl) MaxRate() int64 {
return t.maxRateModule.MaxRate()
}
// SetMaxRate updates the rate of the MaxRateModule.
-func (t *Throttler) SetMaxRate(rate int64) {
+func (t *ThrottlerImpl) SetMaxRate(rate int64) {
t.maxRateModule.SetMaxRate(rate)
}
// RecordReplicationLag must be called by users to report the "ts" tablet health
// data observed at "time".
// Note: After Close() is called, this method must not be called anymore.
-func (t *Throttler) RecordReplicationLag(time time.Time, th *discovery.TabletHealth) {
+func (t *ThrottlerImpl) RecordReplicationLag(time time.Time, th *discovery.TabletHealth) {
t.maxReplicationLagModule.RecordReplicationLag(time, th)
}
// GetConfiguration returns the configuration of the MaxReplicationLag module.
-func (t *Throttler) GetConfiguration() *throttlerdatapb.Configuration {
+func (t *ThrottlerImpl) GetConfiguration() *throttlerdatapb.Configuration {
return t.maxReplicationLagModule.getConfiguration()
}
// UpdateConfiguration updates the configuration of the MaxReplicationLag module.
-func (t *Throttler) UpdateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error {
+func (t *ThrottlerImpl) UpdateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error {
return t.maxReplicationLagModule.updateConfiguration(configuration, copyZeroValues)
}
// ResetConfiguration resets the configuration of the MaxReplicationLag module
// to its initial settings.
-func (t *Throttler) ResetConfiguration() {
+func (t *ThrottlerImpl) ResetConfiguration() {
t.maxReplicationLagModule.resetConfiguration()
}
-// log returns the most recent changes of the MaxReplicationLag module.
-func (t *Throttler) log() []result {
+// Log returns the most recent changes of the MaxReplicationLag module.
+func (t *ThrottlerImpl) Log() []Result {
return t.maxReplicationLagModule.log()
}
diff --git a/go/vt/throttler/throttler_test.go b/go/vt/throttler/throttler_test.go
index 0bb0ed0387a..e7e7c13c466 100644
--- a/go/vt/throttler/throttler_test.go
+++ b/go/vt/throttler/throttler_test.go
@@ -17,10 +17,18 @@ limitations under the License.
package throttler
import (
+ "context"
"runtime"
"strings"
+ "sync"
"testing"
"time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/discovery"
+ "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/proto/topodata"
)
// The main purpose of the benchmarks below is to demonstrate the functionality
@@ -162,7 +170,7 @@ func sinceZero(sinceZero time.Duration) time.Time {
// threadThrottler.newThreadThrottler() for more details.
// newThrottlerWithClock should only be used for testing.
-func newThrottlerWithClock(name, unit string, threadCount int, maxRate int64, maxReplicationLag int64, nowFunc func() time.Time) (*Throttler, error) {
+func newThrottlerWithClock(name, unit string, threadCount int, maxRate int64, maxReplicationLag int64, nowFunc func() time.Time) (Throttler, error) {
return newThrottler(GlobalManager, name, unit, threadCount, maxRate, maxReplicationLag, nowFunc)
}
@@ -274,14 +282,16 @@ func TestThreadFinished(t *testing.T) {
// Max rate update to threadThrottlers happens asynchronously. Wait for it.
timer := time.NewTimer(2 * time.Second)
+ throttlerImpl, ok := throttler.(*ThrottlerImpl)
+ require.True(t, ok)
for {
- if throttler.threadThrottlers[0].getMaxRate() == 2 {
+ if throttlerImpl.threadThrottlers[0].getMaxRate() == 2 {
timer.Stop()
break
}
select {
case <-timer.C:
- t.Fatalf("max rate was not propapgated to threadThrottler[0] in time: %v", throttler.threadThrottlers[0].getMaxRate())
+ t.Fatalf("max rate was not propapgated to threadThrottler[0] in time: %v", throttlerImpl.threadThrottlers[0].getMaxRate())
default:
// Timer not up yet. Try again.
}
@@ -389,7 +399,9 @@ func TestUpdateMaxRate_AllThreadsFinished(t *testing.T) {
throttler.ThreadFinished(1)
// Make sure that there's no division by zero error (threadsRunning == 0).
- throttler.updateMaxRate()
+ throttlerImpl, ok := throttler.(*ThrottlerImpl)
+ require.True(t, ok)
+ throttlerImpl.updateMaxRate()
// We don't care about the Throttler state at this point.
}
@@ -426,3 +438,78 @@ func TestThreadFinished_SecondCallPanics(t *testing.T) {
}()
throttler.ThreadFinished(0)
}
+
+func TestThrottlerMaxLag(t *testing.T) {
+ fc := &fakeClock{}
+ throttler, err := newThrottlerWithClock(t.Name(), "queries", 1, 1, 10, fc.now)
+ require.NoError(t, err)
+ defer throttler.Close()
+
+ require.NotNil(t, throttler)
+ throttlerImpl, ok := throttler.(*ThrottlerImpl)
+ require.True(t, ok)
+ require.NotNil(t, throttlerImpl.maxReplicationLagModule)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg sync.WaitGroup
+
+ // run .add() and .MaxLag() concurrently to detect races
+ for _, tabletType := range []topodata.TabletType{
+ topodata.TabletType_REPLICA,
+ topodata.TabletType_RDONLY,
+ } {
+ wg.Add(1)
+ go func(wg *sync.WaitGroup, ctx context.Context, t *ThrottlerImpl, tabletType topodata.TabletType) {
+ defer wg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ throttler.MaxLag(tabletType)
+ }
+ }
+ }(&wg, ctx, throttlerImpl, tabletType)
+
+ wg.Add(1)
+ go func(wg *sync.WaitGroup, ctx context.Context, throttler *ThrottlerImpl, tabletType topodata.TabletType) {
+ defer wg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ cache := throttler.maxReplicationLagModule.lagCacheByType(tabletType)
+ require.NotNil(t, cache)
+ cache.add(replicationLagRecord{
+ time: time.Now(),
+ TabletHealth: discovery.TabletHealth{
+ Serving: true,
+ Stats: &query.RealtimeStats{
+ ReplicationLagSeconds: 5,
+ },
+ Tablet: &topodata.Tablet{
+ Hostname: t.Name(),
+ Type: tabletType,
+ PortMap: map[string]int32{
+ "test": 15999,
+ },
+ },
+ },
+ })
+ }
+ }
+ }(&wg, ctx, throttlerImpl, tabletType)
+ }
+ time.Sleep(time.Second)
+ cancel()
+ wg.Wait()
+
+ // check .MaxLag()
+ for _, tabletType := range []topodata.TabletType{
+ topodata.TabletType_REPLICA,
+ topodata.TabletType_RDONLY,
+ } {
+ require.Equal(t, uint32(5), throttler.MaxLag(tabletType))
+ }
+}
diff --git a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go b/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go
index 38fd9d76286..99b5a40e0ca 100644
--- a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go
+++ b/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go
@@ -73,7 +73,7 @@ func TestSuitePanics(t *testing.T, c throttlerclient.Client) {
var throttlerNames = []string{"t1", "t2"}
type testFixture struct {
- throttlers []*throttler.Throttler
+ throttlers []throttler.Throttler
}
func (tf *testFixture) setUp() error {
diff --git a/go/vt/throttler/throttlerlogz.go b/go/vt/throttler/throttlerlogz.go
index 6952b34feec..b5ce5376108 100644
--- a/go/vt/throttler/throttlerlogz.go
+++ b/go/vt/throttler/throttlerlogz.go
@@ -152,7 +152,7 @@ func showThrottlerLog(w http.ResponseWriter, m *managerImpl, name string) {
colorLevel = "high"
}
data := struct {
- result
+ Result
ColorLevel string
}{r, colorLevel}
diff --git a/go/vt/throttler/throttlerlogz_test.go b/go/vt/throttler/throttlerlogz_test.go
index 82ebb77e7a1..d5d1ff62327 100644
--- a/go/vt/throttler/throttlerlogz_test.go
+++ b/go/vt/throttler/throttlerlogz_test.go
@@ -21,6 +21,8 @@ import (
"net/http/httptest"
"strings"
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestThrottlerlogzHandler_MissingSlash(t *testing.T) {
@@ -55,7 +57,7 @@ func TestThrottlerlogzHandler(t *testing.T) {
testcases := []struct {
desc string
- r result
+ r Result
want string
}{
{
@@ -148,7 +150,9 @@ func TestThrottlerlogzHandler(t *testing.T) {
request, _ := http.NewRequest("GET", "/throttlerlogz/t1", nil)
response := httptest.NewRecorder()
- f.t1.maxReplicationLagModule.results.add(tc.r)
+ throttler, ok := f.t1.(*ThrottlerImpl)
+ require.True(t, ok)
+ throttler.maxReplicationLagModule.results.add(tc.r)
throttlerlogzHandler(response, request, f.m)
got := response.Body.String()
diff --git a/go/vt/tlstest/tlstest.go b/go/vt/tlstest/tlstest.go
index 500a3974c48..0529ea4ef09 100644
--- a/go/vt/tlstest/tlstest.go
+++ b/go/vt/tlstest/tlstest.go
@@ -337,12 +337,18 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) {
if err != nil {
log.Fatal(err)
}
- crlList, err := x509.ParseCRL(data)
+
+ block, _ := pem.Decode(data)
+ if block == nil || block.Type != "X509 CRL" {
+ log.Fatal("failed to parse CRL PEM")
+ }
+
+ crlList, err := x509.ParseRevocationList(block.Bytes)
if err != nil {
log.Fatal(err)
}
- revoked := crlList.TBSCertList.RevokedCertificates
+ revoked := crlList.RevokedCertificates
revoked = append(revoked, pkix.RevokedCertificate{
SerialNumber: certificate.SerialNumber,
RevocationTime: time.Now(),
@@ -357,9 +363,10 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) {
log.Fatal(err)
}
+ var crlNumber big.Int
newCrl, err := x509.CreateRevocationList(rand.Reader, &x509.RevocationList{
RevokedCertificates: revoked,
- Number: big.NewInt(int64(crlList.TBSCertList.Version) + 1),
+ Number: crlNumber.Add(crlList.Number, big.NewInt(1)),
}, caCert, caKey.(crypto.Signer))
if err != nil {
log.Fatal(err)
diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go
index c12e65b8d88..5c79e45b906 100644
--- a/go/vt/tlstest/tlstest_test.go
+++ b/go/vt/tlstest/tlstest_test.go
@@ -162,7 +162,7 @@ func testClientServer(t *testing.T, combineCerts bool) {
// With TLS 1.3, the Dial will succeed and the first Read will fail.
clientConn, err := tls.DialWithDialer(dialer, "tcp", addr, badClientConfig)
if err != nil {
- if !strings.Contains(err.Error(), "bad certificate") {
+ if !strings.Contains(err.Error(), "certificate required") {
t.Errorf("Wrong error returned: %v", err)
}
return
@@ -177,7 +177,8 @@ func testClientServer(t *testing.T, combineCerts bool) {
if err == nil {
t.Fatalf("Dial or first Read was expected to fail")
}
- if !strings.Contains(err.Error(), "bad certificate") {
+
+ if !strings.Contains(err.Error(), "certificate required") {
t.Errorf("Wrong error returned: %v", err)
}
}
diff --git a/go/vt/topo/consultopo/error.go b/go/vt/topo/consultopo/error.go
index 42f474e065b..62167a4d295 100644
--- a/go/vt/topo/consultopo/error.go
+++ b/go/vt/topo/consultopo/error.go
@@ -40,15 +40,16 @@ var (
// are either application-level errors, or context errors.
func convertError(err error, nodePath string) error {
// Unwrap errors from the Go HTTP client.
- if urlErr, ok := err.(*url.Error); ok {
+ var urlErr *url.Error
+ if errors.As(err, &urlErr) {
err = urlErr.Err
}
// Convert specific sentinel values.
- switch err {
- case context.Canceled:
+ switch {
+ case errors.Is(err, context.Canceled):
return topo.NewError(topo.Interrupted, nodePath)
- case context.DeadlineExceeded:
+ case errors.Is(err, context.DeadlineExceeded):
return topo.NewError(topo.Timeout, nodePath)
}
diff --git a/go/vt/topo/consultopo/file.go b/go/vt/topo/consultopo/file.go
index 2c08a675a79..e9d8dfbccd1 100644
--- a/go/vt/topo/consultopo/file.go
+++ b/go/vt/topo/consultopo/file.go
@@ -87,7 +87,9 @@ func (s *Server) Update(ctx context.Context, filePath string, contents []byte, v
func (s *Server) Get(ctx context.Context, filePath string) ([]byte, topo.Version, error) {
nodePath := path.Join(s.root, filePath)
- pair, _, err := s.kv.Get(nodePath, nil)
+ pair, _, err := s.kv.Get(nodePath, &api.QueryOptions{
+ AllowStale: consulAllowStaleReads,
+ })
if err != nil {
return nil, nil, err
}
@@ -102,7 +104,9 @@ func (s *Server) Get(ctx context.Context, filePath string) ([]byte, topo.Version
func (s *Server) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo, error) {
nodePathPrefix := path.Join(s.root, filePathPrefix)
- pairs, _, err := s.kv.List(nodePathPrefix, nil)
+ pairs, _, err := s.kv.List(nodePathPrefix, &api.QueryOptions{
+ AllowStale: consulAllowStaleReads,
+ })
if err != nil {
return []topo.KVInfo{}, err
}
diff --git a/go/vt/topo/consultopo/server.go b/go/vt/topo/consultopo/server.go
index 3e9192b0e46..c6d865fb27a 100644
--- a/go/vt/topo/consultopo/server.go
+++ b/go/vt/topo/consultopo/server.go
@@ -28,6 +28,7 @@ import (
"time"
"github.com/hashicorp/consul/api"
+ "github.com/hashicorp/go-cleanhttp"
"github.com/spf13/pflag"
"vitess.io/vitess/go/vt/log"
@@ -41,7 +42,11 @@ var (
// serfHealth is the default check from consul
consulLockSessionChecks = "serfHealth"
consulLockSessionTTL string
- consulLockDelay = 15 * time.Second
+ consulLockDelay = 15 * time.Second
+ consulMaxConnsPerHost int = 250 // do not use client default of 0/unlimited
+ consulMaxIdleConns int
+ consulIdleConnTimeout time.Duration
+ consulAllowStaleReads bool
)
func init() {
@@ -49,10 +54,19 @@ func init() {
}
func registerServerFlags(fs *pflag.FlagSet) {
+ // cleanhttp.DefaultPooledTransport() is used by the consul api client
+ // as an *http.Transport. We call it here just to get the default
+ // values the consul api client will inherit from it later.
+ defaultConsulPooledTransport := cleanhttp.DefaultPooledTransport()
+
fs.StringVar(&consulAuthClientStaticFile, "consul_auth_static_file", consulAuthClientStaticFile, "JSON File to read the topos/tokens from.")
fs.StringVar(&consulLockSessionChecks, "topo_consul_lock_session_checks", consulLockSessionChecks, "List of checks for consul session.")
fs.StringVar(&consulLockSessionTTL, "topo_consul_lock_session_ttl", consulLockSessionTTL, "TTL for consul session.")
fs.DurationVar(&consulLockDelay, "topo_consul_lock_delay", consulLockDelay, "LockDelay for consul session.")
+ fs.IntVar(&consulMaxConnsPerHost, "topo_consul_max_conns_per_host", consulMaxConnsPerHost, "Maximum number of consul connections per host.")
+ fs.IntVar(&consulMaxIdleConns, "topo_consul_max_idle_conns", defaultConsulPooledTransport.MaxIdleConns, "Maximum number of idle consul connections.")
+ fs.DurationVar(&consulIdleConnTimeout, "topo_consul_idle_conn_timeout", defaultConsulPooledTransport.IdleConnTimeout, "Maximum amount of time to pool idle connections.")
+ fs.BoolVar(&consulAllowStaleReads, "topo_consul_allow_stale_reads", consulAllowStaleReads, "Allow stale reads from consul servers")
}
// ClientAuthCred credential to use for consul clusters
@@ -133,6 +147,9 @@ func NewServer(cell, serverAddr, root string) (*Server, error) {
}
cfg := api.DefaultConfig()
cfg.Address = serverAddr
+ cfg.Transport.MaxConnsPerHost = consulMaxConnsPerHost
+ cfg.Transport.MaxIdleConns = consulMaxIdleConns
+ cfg.Transport.IdleConnTimeout = consulIdleConnTimeout
if creds != nil {
if creds[cell] != nil {
cfg.Token = creds[cell].ACLToken
diff --git a/go/vt/topo/decode.go b/go/vt/topo/decode.go
new file mode 100644
index 00000000000..1265b0e4a80
--- /dev/null
+++ b/go/vt/topo/decode.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package topo
+
+import (
+ "fmt"
+ "path"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+)
+
+// DecodeContent uses the filename to imply a type, and proto-decodes
+// the right object, then echoes it as a string.
+func DecodeContent(filename string, data []byte, json bool) (string, error) {
+ name := path.Base(filename)
+ dir := path.Dir(filename)
+ var p proto.Message
+ switch name {
+ case CellInfoFile:
+ p = new(topodatapb.CellInfo)
+ case KeyspaceFile:
+ p = new(topodatapb.Keyspace)
+ case ShardFile:
+ p = new(topodatapb.Shard)
+ case VSchemaFile:
+ p = new(vschemapb.Keyspace)
+ case ShardReplicationFile:
+ p = new(topodatapb.ShardReplication)
+ case TabletFile:
+ p = new(topodatapb.Tablet)
+ case SrvVSchemaFile:
+ p = new(vschemapb.SrvVSchema)
+ case SrvKeyspaceFile:
+ p = new(topodatapb.SrvKeyspace)
+ case RoutingRulesFile:
+ p = new(vschemapb.RoutingRules)
+ default:
+ switch dir {
+ case "/" + GetExternalVitessClusterDir():
+ p = new(topodatapb.ExternalVitessCluster)
+ default:
+ }
+ if p == nil {
+ if json {
+ return "", fmt.Errorf("unknown topo protobuf type for %v", name)
+ }
+ return string(data), nil
+ }
+ }
+
+ if err := proto.Unmarshal(data, p); err != nil {
+ return string(data), err
+ }
+
+ var marshalled []byte
+ var err error
+ if json {
+ marshalled, err = protojson.Marshal(p)
+ } else {
+ marshalled, err = prototext.Marshal(p)
+ }
+ return string(marshalled), err
+}
diff --git a/go/vt/topo/errors.go b/go/vt/topo/errors.go
index a645f1aa178..3be4b60b103 100644
--- a/go/vt/topo/errors.go
+++ b/go/vt/topo/errors.go
@@ -36,6 +36,7 @@ const (
NoUpdateNeeded
NoImplementation
NoReadOnlyImplementation
+ ResourceExhausted
)
// Error represents a topo error.
@@ -68,6 +69,8 @@ func NewError(code ErrorCode, node string) error {
message = fmt.Sprintf("no such topology implementation %s", node)
case NoReadOnlyImplementation:
message = fmt.Sprintf("no read-only topology implementation %s", node)
+ case ResourceExhausted:
+ message = fmt.Sprintf("server resource exhausted: %s", node)
default:
message = fmt.Sprintf("unknown code: %s", node)
}
diff --git a/go/vt/topo/etcd2topo/error.go b/go/vt/topo/etcd2topo/error.go
index e784fecd9b9..5e13d0bdf8d 100644
--- a/go/vt/topo/etcd2topo/error.go
+++ b/go/vt/topo/etcd2topo/error.go
@@ -45,7 +45,8 @@ func convertError(err error, nodePath string) error {
return nil
}
- if typeErr, ok := err.(rpctypes.EtcdError); ok {
+ var typeErr rpctypes.EtcdError
+ if errors.As(err, &typeErr) {
switch typeErr.Code() {
case codes.NotFound:
return topo.NewError(topo.NoNode, nodePath)
@@ -61,6 +62,8 @@ func convertError(err error, nodePath string) error {
// etcd primary election is failing, so timeout
// also sounds reasonable there.
return topo.NewError(topo.Timeout, nodePath)
+ case codes.ResourceExhausted:
+ return topo.NewError(topo.ResourceExhausted, nodePath)
}
return err
}
@@ -74,15 +77,17 @@ func convertError(err error, nodePath string) error {
return topo.NewError(topo.Interrupted, nodePath)
case codes.DeadlineExceeded:
return topo.NewError(topo.Timeout, nodePath)
+ case codes.ResourceExhausted:
+ return topo.NewError(topo.ResourceExhausted, nodePath)
default:
return err
}
}
- switch err {
- case context.Canceled:
+ switch {
+ case errors.Is(err, context.Canceled):
return topo.NewError(topo.Interrupted, nodePath)
- case context.DeadlineExceeded:
+ case errors.Is(err, context.DeadlineExceeded):
return topo.NewError(topo.Timeout, nodePath)
default:
return err
diff --git a/go/vt/topo/locks.go b/go/vt/topo/locks.go
index 5ee60c2ea2f..a9e71a6e7d6 100644
--- a/go/vt/topo/locks.go
+++ b/go/vt/topo/locks.go
@@ -27,6 +27,7 @@ import (
"github.com/spf13/pflag"
+ _flag "vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/proto/vtrpc"
@@ -38,15 +39,14 @@ import (
// keyspaces and shards.
var (
- // DefaultLockTimeout is a good value to use as a default for
- // locking a shard / keyspace.
- // Now used only for unlock operations
- defaultLockTimeout = 30 * time.Second
+ // LockTimeout is the maximum duration for which a
+ // shard / keyspace lock can be acquired for.
+ LockTimeout = 45 * time.Second
// RemoteOperationTimeout is used for operations where we have to
// call out to another process.
// Used for RPC calls (including topo server calls)
- RemoteOperationTimeout = 30 * time.Second
+ RemoteOperationTimeout = 15 * time.Second
)
// Lock describes a long-running lock on a keyspace or a shard.
@@ -70,6 +70,7 @@ func init() {
func registerTopoLockFlags(fs *pflag.FlagSet) {
fs.DurationVar(&RemoteOperationTimeout, "remote_operation_timeout", RemoteOperationTimeout, "time to wait for a remote operation")
+ fs.DurationVar(&LockTimeout, "lock-timeout", LockTimeout, "Maximum time for which a shard/keyspace lock can be acquired for")
}
// newLock creates a new Lock.
@@ -244,7 +245,7 @@ func CheckKeyspaceLockedAndRenew(ctx context.Context, keyspace string) error {
func (l *Lock) lockKeyspace(ctx context.Context, ts *Server, keyspace string) (LockDescriptor, error) {
log.Infof("Locking keyspace %v for action %v", keyspace, l.Action)
- ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, getLockTimeout())
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.LockKeyspaceForAction")
@@ -265,10 +266,8 @@ func (l *Lock) unlockKeyspace(ctx context.Context, ts *Server, keyspace string,
// Detach from the parent timeout, but copy the trace span.
// We need to still release the lock even if the parent
// context timed out.
- // Note that we are not using the user provided RemoteOperationTimeout
- // here because it is possible that that timeout is too short.
ctx = trace.CopySpan(context.TODO(), ctx)
- ctx, cancel := context.WithTimeout(ctx, defaultLockTimeout)
+ ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.UnlockKeyspaceForAction")
@@ -385,7 +384,7 @@ func CheckShardLocked(ctx context.Context, keyspace, shard string) error {
func (l *Lock) lockShard(ctx context.Context, ts *Server, keyspace, shard string) (LockDescriptor, error) {
log.Infof("Locking shard %v/%v for action %v", keyspace, shard, l.Action)
- ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
+ ctx, cancel := context.WithTimeout(ctx, getLockTimeout())
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.LockShardForAction")
@@ -406,10 +405,8 @@ func (l *Lock) lockShard(ctx context.Context, ts *Server, keyspace, shard string
func (l *Lock) unlockShard(ctx context.Context, ts *Server, keyspace, shard string, lockDescriptor LockDescriptor, actionError error) error {
// Detach from the parent timeout, but copy the trace span.
// We need to still release the lock even if the parent context timed out.
- // Note that we are not using the user provided RemoteOperationTimeout
- // here because it is possible that that timeout is too short.
ctx = trace.CopySpan(context.TODO(), ctx)
- ctx, cancel := context.WithTimeout(ctx, defaultLockTimeout)
+ ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout)
defer cancel()
span, ctx := trace.NewSpan(ctx, "TopoServer.UnlockShardForAction")
@@ -428,3 +425,15 @@ func (l *Lock) unlockShard(ctx context.Context, ts *Server, keyspace, shard stri
}
return lockDescriptor.Unlock(ctx)
}
+
+// getLockTimeout is shim code used for backward compatibility with v15
+// This code can be removed in v17+ and LockTimeout can be used directly
+func getLockTimeout() time.Duration {
+ if _flag.IsFlagProvided("lock-timeout") {
+ return LockTimeout
+ }
+ if _flag.IsFlagProvided("remote_operation_timeout") {
+ return RemoteOperationTimeout
+ }
+ return LockTimeout
+}
diff --git a/go/vt/topo/locks_test.go b/go/vt/topo/locks_test.go
new file mode 100644
index 00000000000..da4f179f83c
--- /dev/null
+++ b/go/vt/topo/locks_test.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package topo
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/internal/flag"
+)
+
+// TestGetLockTimeout tests the behaviour of
+// getLockTimeout function in different situations where
+// the two flags `remote_operation_timeout` and `lock-timeout` are
+// provided or not.
+func TestGetLockTimeout(t *testing.T) {
+ tests := []struct {
+ description string
+ lockTimeoutValue string
+ remoteOperationTimeoutValue string
+ expectedLockTimeout time.Duration
+ }{
+ {
+ description: "no flags specified",
+ lockTimeoutValue: "",
+ remoteOperationTimeoutValue: "",
+ expectedLockTimeout: 45 * time.Second,
+ }, {
+ description: "lock-timeout flag specified",
+ lockTimeoutValue: "33s",
+ remoteOperationTimeoutValue: "",
+ expectedLockTimeout: 33 * time.Second,
+ }, {
+ description: "remote operation timeout flag specified",
+ lockTimeoutValue: "",
+ remoteOperationTimeoutValue: "33s",
+ expectedLockTimeout: 33 * time.Second,
+ }, {
+ description: "both flags specified",
+ lockTimeoutValue: "33s",
+ remoteOperationTimeoutValue: "22s",
+ expectedLockTimeout: 33 * time.Second,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ var args []string
+ if tt.lockTimeoutValue != "" {
+ args = append(args, "--lock-timeout", tt.lockTimeoutValue)
+ }
+ if tt.remoteOperationTimeoutValue != "" {
+ args = append(args, "--remote_operation_timeout", tt.remoteOperationTimeoutValue)
+ }
+ os.Args = os.Args[0:1]
+ os.Args = append(os.Args, args...)
+
+ fs := pflag.NewFlagSet("test", pflag.ExitOnError)
+ registerTopoLockFlags(fs)
+ flag.Parse(fs)
+
+ val := getLockTimeout()
+ require.Equal(t, tt.expectedLockTimeout, val)
+ })
+ }
+
+}
diff --git a/go/vt/topo/memorytopo/file.go b/go/vt/topo/memorytopo/file.go
index 0abfc56cb80..e45d2b23ee4 100644
--- a/go/vt/topo/memorytopo/file.go
+++ b/go/vt/topo/memorytopo/file.go
@@ -187,6 +187,9 @@ func (c *Conn) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo,
if c.factory.err != nil {
return nil, c.factory.err
}
+ if c.factory.listErr != nil {
+ return nil, c.factory.listErr
+ }
dir, file := path.Split(filePathPrefix)
// Get the node to list.
diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go
index cdad2ddbcdd..0aa066054f4 100644
--- a/go/vt/topo/memorytopo/memorytopo.go
+++ b/go/vt/topo/memorytopo/memorytopo.go
@@ -75,6 +75,9 @@ type Factory struct {
// err is used for testing purposes to force queries / watches
// to return the given error
err error
+ // listErr is used for testing purposed to fake errors from
+ // calls to List.
+ listErr error
}
// HasGlobalReadOnlyCell is part of the topo.Factory interface.
@@ -343,6 +346,13 @@ func (f *Factory) recursiveDelete(n *node) {
}
}
+func (f *Factory) SetListError(err error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ f.listErr = err
+}
+
func init() {
rand.Seed(time.Now().UnixNano())
}
diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go
index 9ca9b2d3322..20af5c624a2 100644
--- a/go/vt/topo/server.go
+++ b/go/vt/topo/server.go
@@ -173,7 +173,7 @@ var (
cellsToAliases: make(map[string]string),
}
- FlagBinaries = []string{"vttablet", "vtctl", "vtctld", "vtcombo", "vtexplain", "vtgate",
+ FlagBinaries = []string{"vttablet", "vtctl", "vtctld", "vtcombo", "vtgate",
"vtgr", "vtorc", "vtbackup"}
)
diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go
index 7f03bf13364..b8e9344109d 100644
--- a/go/vt/topo/shard.go
+++ b/go/vt/topo/shard.go
@@ -635,7 +635,7 @@ func (ts *Server) GetTabletMapForShardByCell(ctx context.Context, keyspace, shar
// get the tablets for the cells we were able to reach, forward
// ErrPartialResult from FindAllTabletAliasesInShard
- result, gerr := ts.GetTabletMap(ctx, aliases)
+ result, gerr := ts.GetTabletMap(ctx, aliases, nil)
if gerr == nil && err != nil {
gerr = err
}
diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go
index 619b67489e4..fda832a8286 100644
--- a/go/vt/topo/tablet.go
+++ b/go/vt/topo/tablet.go
@@ -24,6 +24,8 @@ import (
"sync"
"time"
+ "golang.org/x/sync/semaphore"
+
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/proto/vtrpc"
@@ -285,10 +287,17 @@ func (ts *Server) GetTabletAliasesByCell(ctx context.Context, cell string) ([]*t
return result, nil
}
+// GetTabletsByCellOptions controls the behavior of
+// Server.FindAllShardsInKeyspace.
+type GetTabletsByCellOptions struct {
+ // Concurrency controls the maximum number of concurrent calls to GetTablet.
+ Concurrency int64
+}
+
// GetTabletsByCell returns all the tablets in the cell.
// It returns ErrNoNode if the cell doesn't exist.
// It returns (nil, nil) if the cell exists, but there are no tablets in it.
-func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string) ([]*TabletInfo, error) {
+func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string, opt *GetTabletsByCellOptions) ([]*TabletInfo, error) {
// If the cell doesn't exist, this will return ErrNoNode.
cellConn, err := ts.ConnForCell(ctx, cellAlias)
if err != nil {
@@ -296,10 +305,12 @@ func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string) ([]*Ta
}
listResults, err := cellConn.List(ctx, TabletsPath)
if err != nil || len(listResults) == 0 {
- // Currently the ZooKeeper and Memory topo implementations do not support scans
+ // Currently the ZooKeeper implementation does not support scans
// so we fall back to the more costly method of fetching the tablets one by one.
- if IsErrType(err, NoImplementation) {
- return ts.GetTabletsIndividuallyByCell(ctx, cellAlias)
+ // In the etcd case, it is possible that the response is too large. We also fall
+ // back to fetching the tablets one by one in that case.
+ if IsErrType(err, NoImplementation) || IsErrType(err, ResourceExhausted) {
+ return ts.GetTabletsIndividuallyByCell(ctx, cellAlias, opt)
}
if IsErrType(err, NoNode) {
return nil, nil
@@ -323,7 +334,7 @@ func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string) ([]*Ta
// directly support the topoConn.List() functionality.
// It returns ErrNoNode if the cell doesn't exist.
// It returns (nil, nil) if the cell exists, but there are no tablets in it.
-func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string) ([]*TabletInfo, error) {
+func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string, opt *GetTabletsByCellOptions) ([]*TabletInfo, error) {
// If the cell doesn't exist, this will return ErrNoNode.
aliases, err := ts.GetTabletAliasesByCell(ctx, cell)
if err != nil {
@@ -331,7 +342,7 @@ func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string)
}
sort.Sort(topoproto.TabletAliasList(aliases))
- tabletMap, err := ts.GetTabletMap(ctx, aliases)
+ tabletMap, err := ts.GetTabletMap(ctx, aliases, opt)
if err != nil {
// we got another error than topo.ErrNoNode
return nil, err
@@ -506,41 +517,62 @@ func DeleteTabletReplicationData(ctx context.Context, ts *Server, tablet *topoda
}
// GetTabletMap tries to read all the tablets in the provided list,
-// and returns them all in a map.
-// If error is ErrPartialResult, the results in the dictionary are
+// and returns them in a map.
+// If error is ErrPartialResult, the results in the map are
// incomplete, meaning some tablets couldn't be read.
// The map is indexed by topoproto.TabletAliasString(tablet alias).
-func (ts *Server) GetTabletMap(ctx context.Context, tabletAliases []*topodatapb.TabletAlias) (map[string]*TabletInfo, error) {
+func (ts *Server) GetTabletMap(ctx context.Context, tabletAliases []*topodatapb.TabletAlias, opt *GetTabletsByCellOptions) (map[string]*TabletInfo, error) {
span, ctx := trace.NewSpan(ctx, "topo.GetTabletMap")
span.Annotate("num_tablets", len(tabletAliases))
defer span.Finish()
- wg := sync.WaitGroup{}
- mutex := sync.Mutex{}
+ var (
+ mu sync.Mutex
+ wg sync.WaitGroup
+ tabletMap = make(map[string]*TabletInfo)
+ returnErr error
+ // Previously this was always run with unlimited concurrency, so 32 should be fine.
+ concurrency int64 = 32
+ )
- tabletMap := make(map[string]*TabletInfo)
- var someError error
+ if opt != nil && opt.Concurrency > 0 {
+ concurrency = opt.Concurrency
+ }
+ var sem = semaphore.NewWeighted(concurrency)
for _, tabletAlias := range tabletAliases {
wg.Add(1)
go func(tabletAlias *topodatapb.TabletAlias) {
defer wg.Done()
+ if err := sem.Acquire(ctx, 1); err != nil {
+ // Only happens if context is cancelled.
+ mu.Lock()
+ defer mu.Unlock()
+ log.Warningf("%v: %v", tabletAlias, err)
+ // We only need to set this on the first error.
+ if returnErr == nil {
+ returnErr = NewError(PartialResult, tabletAlias.GetCell())
+ }
+ return
+ }
tabletInfo, err := ts.GetTablet(ctx, tabletAlias)
- mutex.Lock()
+ sem.Release(1)
+ mu.Lock()
+ defer mu.Unlock()
if err != nil {
log.Warningf("%v: %v", tabletAlias, err)
// There can be data races removing nodes - ignore them for now.
- if !IsErrType(err, NoNode) {
- someError = NewError(PartialResult, "")
+ // We only need to set this on first error.
+ if returnErr == nil && !IsErrType(err, NoNode) {
+ returnErr = NewError(PartialResult, tabletAlias.GetCell())
}
} else {
tabletMap[topoproto.TabletAliasString(tabletAlias)] = tabletInfo
}
- mutex.Unlock()
}(tabletAlias)
}
wg.Wait()
- return tabletMap, someError
+ return tabletMap, returnErr
}
// InitTablet creates or updates a tablet. If no parent is specified
diff --git a/go/vt/topo/tablet_test.go b/go/vt/topo/tablet_test.go
new file mode 100644
index 00000000000..1f94fd62a47
--- /dev/null
+++ b/go/vt/topo/tablet_test.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2023 The Vitess Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package topo_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+)
+
+// Test various cases of calls to GetTabletsByCell.
+// GetTabletsByCell first tries to get all the tablets using List.
+// If the response is too large, we will get an error, and fall back to one tablet at a time.
+func TestServerGetTabletsByCell(t *testing.T) {
+ tests := []struct {
+ name string
+ tablets int
+ opt *topo.GetTabletsByCellOptions
+ listError error
+ }{
+ {
+ name: "negative concurrency",
+ tablets: 1,
+ // Ensure this doesn't panic.
+ opt: &topo.GetTabletsByCellOptions{Concurrency: -1},
+ },
+ {
+ name: "single",
+ tablets: 1,
+ // Make sure the defaults apply as expected.
+ opt: nil,
+ },
+ {
+ name: "multiple",
+ // should work with more than 1 tablet
+ tablets: 32,
+ opt: &topo.GetTabletsByCellOptions{Concurrency: 8},
+ },
+ {
+ name: "multiple with list error",
+ // should work with more than 1 tablet when List returns an error
+ tablets: 32,
+ opt: &topo.GetTabletsByCellOptions{Concurrency: 8},
+ listError: topo.NewError(topo.ResourceExhausted, ""),
+ },
+ }
+
+ const cell = "zone1"
+ const keyspace = "keyspace"
+ const shard = "shard"
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ ts, factory := memorytopo.NewServerAndFactory(cell)
+ defer ts.Close()
+ if tt.listError != nil {
+ factory.SetListError(tt.listError)
+ }
+
+ // Create an ephemeral keyspace and generate shard records within
+ // the keyspace to fetch later.
+ require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}))
+ require.NoError(t, ts.CreateShard(ctx, keyspace, shard))
+
+ tablets := make([]*topo.TabletInfo, tt.tablets)
+
+ for i := 0; i < tt.tablets; i++ {
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: cell,
+ Uid: uint32(i),
+ },
+ Hostname: "host1",
+ PortMap: map[string]int32{
+ "vt": int32(i),
+ },
+ Keyspace: keyspace,
+ Shard: shard,
+ }
+ tInfo := &topo.TabletInfo{Tablet: tablet}
+ tablets[i] = tInfo
+ require.NoError(t, ts.CreateTablet(ctx, tablet))
+ }
+
+ // Verify that we return a complete list of tablets and that each
+ // tablet matches what we expect.
+ out, err := ts.GetTabletsByCell(ctx, cell, tt.opt)
+ require.NoError(t, err)
+ require.Len(t, out, tt.tablets)
+
+ for i, tab := range tablets {
+ require.Equal(t, tab.Tablet, tablets[i].Tablet)
+ }
+ })
+ }
+}
diff --git a/go/vt/topo/topoproto/flag.go b/go/vt/topo/topoproto/flag.go
index d92378017ae..becc789b59f 100644
--- a/go/vt/topo/topoproto/flag.go
+++ b/go/vt/topo/topoproto/flag.go
@@ -39,7 +39,7 @@ func (ttlv *TabletTypeListFlag) Set(v string) (err error) {
// Type is part of the pflag.Value interface.
func (ttlv *TabletTypeListFlag) Type() string {
- return "[]topodatapb.TabletType"
+ return "strings"
}
// TabletTypeFlag implements the pflag.Value interface, for parsing a command-line value into a TabletType.
diff --git a/go/vt/topo/topoproto/srvkeyspace.go b/go/vt/topo/topoproto/srvkeyspace.go
index 24618233fb2..cdd0ea20d27 100644
--- a/go/vt/topo/topoproto/srvkeyspace.go
+++ b/go/vt/topo/topoproto/srvkeyspace.go
@@ -51,6 +51,9 @@ func (sra ShardReferenceArray) Sort() { sort.Sort(sra) }
// SrvKeyspaceGetPartition returns a Partition for the given tablet type,
// or nil if it's not there.
func SrvKeyspaceGetPartition(sk *topodatapb.SrvKeyspace, tabletType topodatapb.TabletType) *topodatapb.SrvKeyspace_KeyspacePartition {
+ if sk == nil {
+ return nil
+ }
for _, p := range sk.Partitions {
if p.ServedType == tabletType {
return p
diff --git a/go/vt/topo/zk2topo/error.go b/go/vt/topo/zk2topo/error.go
index 1ebc3896f40..1149ad60bf3 100644
--- a/go/vt/topo/zk2topo/error.go
+++ b/go/vt/topo/zk2topo/error.go
@@ -18,6 +18,7 @@ package zk2topo
import (
"context"
+ "errors"
"github.com/z-division/go-zookeeper/zk"
@@ -26,20 +27,20 @@ import (
// Error codes returned by the zookeeper Go client:
func convertError(err error, node string) error {
- switch err {
- case zk.ErrBadVersion:
+ switch {
+ case errors.Is(err, zk.ErrBadVersion):
return topo.NewError(topo.BadVersion, node)
- case zk.ErrNoNode:
+ case errors.Is(err, zk.ErrNoNode):
return topo.NewError(topo.NoNode, node)
- case zk.ErrNodeExists:
+ case errors.Is(err, zk.ErrNodeExists):
return topo.NewError(topo.NodeExists, node)
- case zk.ErrNotEmpty:
+ case errors.Is(err, zk.ErrNotEmpty):
return topo.NewError(topo.NodeNotEmpty, node)
- case zk.ErrSessionExpired:
+ case errors.Is(err, zk.ErrSessionExpired):
return topo.NewError(topo.Timeout, node)
- case context.Canceled:
+ case errors.Is(err, context.Canceled):
return topo.NewError(topo.Interrupted, node)
- case context.DeadlineExceeded:
+ case errors.Is(err, context.DeadlineExceeded):
return topo.NewError(topo.Timeout, node)
}
return err
diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go
index af6f4b3c3c6..59d9088be9e 100644
--- a/go/vt/topotools/tablet.go
+++ b/go/vt/topotools/tablet.go
@@ -127,7 +127,7 @@ func DoCellsHaveRdonlyTablets(ctx context.Context, ts *topo.Server, cells []stri
}
for _, cell := range cells {
- tablets, err := ts.GetTabletsByCell(ctx, cell)
+ tablets, err := ts.GetTabletsByCell(ctx, cell, nil)
if err != nil {
return false, err
}
diff --git a/go/vt/topotools/utils.go b/go/vt/topotools/utils.go
index 6b618383a1e..6d1522e04e7 100644
--- a/go/vt/topotools/utils.go
+++ b/go/vt/topotools/utils.go
@@ -43,7 +43,7 @@ func GetTabletMapForCell(ctx context.Context, ts *topo.Server, cell string) (map
if err != nil {
return nil, err
}
- tabletMap, err := ts.GetTabletMap(ctx, aliases)
+ tabletMap, err := ts.GetTabletMap(ctx, aliases, nil)
if err != nil {
// we got another error than topo.ErrNoNode
return nil, err
@@ -65,7 +65,7 @@ func GetAllTabletsAcrossCells(ctx context.Context, ts *topo.Server) ([]*topo.Tab
wg.Add(len(cells))
for i, cell := range cells {
go func(i int, cell string) {
- results[i], errors[i] = ts.GetTabletsByCell(ctx, cell)
+ results[i], errors[i] = ts.GetTabletsByCell(ctx, cell, nil)
wg.Done()
}(i, cell)
}
diff --git a/go/vt/topotools/vschema_ddl.go b/go/vt/topotools/vschema_ddl.go
index 30cce3bd5db..e8da2734b4f 100644
--- a/go/vt/topotools/vschema_ddl.go
+++ b/go/vt/topotools/vschema_ddl.go
@@ -17,7 +17,6 @@ limitations under the License.
package topotools
import (
- "fmt"
"reflect"
"vitess.io/vitess/go/vt/sqlparser"
@@ -226,15 +225,9 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, alterVschema *sqlpar
return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema already contains auto inc %v on table %s in keyspace %s", table.AutoIncrement, name, ksName)
}
- sequence := alterVschema.AutoIncSpec.Sequence
- sequenceFqn := sequence.Name.String()
- if sequence.Qualifier.String() != "" {
- sequenceFqn = fmt.Sprintf("%s.%s", sequence.Qualifier.String(), sequenceFqn)
- }
-
table.AutoIncrement = &vschemapb.AutoIncrement{
Column: alterVschema.AutoIncSpec.Column.String(),
- Sequence: sequenceFqn,
+ Sequence: sqlparser.String(alterVschema.AutoIncSpec.Sequence),
}
return ks, nil
diff --git a/go/vt/vitessdriver/rows.go b/go/vt/vitessdriver/rows.go
index d2ace7bdfad..a2438bb891c 100644
--- a/go/vt/vitessdriver/rows.go
+++ b/go/vt/vitessdriver/rows.go
@@ -17,10 +17,14 @@ limitations under the License.
package vitessdriver
import (
+ "database/sql"
"database/sql/driver"
"io"
+ "reflect"
+ "time"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/proto/query"
)
// rows creates a database/sql/driver compliant Row iterator
@@ -58,3 +62,60 @@ func (ri *rows) Next(dest []driver.Value) error {
ri.index++
return nil
}
+
+var (
+ typeInt8 = reflect.TypeOf(int8(0))
+ typeUint8 = reflect.TypeOf(uint8(0))
+ typeInt16 = reflect.TypeOf(int16(0))
+ typeUint16 = reflect.TypeOf(uint16(0))
+ typeInt32 = reflect.TypeOf(int32(0))
+ typeUint32 = reflect.TypeOf(uint32(0))
+ typeInt64 = reflect.TypeOf(int64(0))
+ typeUint64 = reflect.TypeOf(uint64(0))
+ typeFloat32 = reflect.TypeOf(float32(0))
+ typeFloat64 = reflect.TypeOf(float64(0))
+ typeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ typeTime = reflect.TypeOf(time.Time{})
+ typeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+// Implements the RowsColumnTypeScanType interface
+func (ri *rows) ColumnTypeScanType(index int) reflect.Type {
+ field := ri.qr.Fields[index]
+ switch field.GetType() {
+ case query.Type_INT8:
+ return typeInt8
+ case query.Type_UINT8:
+ return typeUint8
+ case query.Type_INT16, query.Type_YEAR:
+ return typeInt16
+ case query.Type_UINT16:
+ return typeUint16
+ case query.Type_INT24:
+ return typeInt32
+ case query.Type_UINT24: // no 24 bit type, using 32 instead
+ return typeUint32
+ case query.Type_INT32:
+ return typeInt32
+ case query.Type_UINT32:
+ return typeUint32
+ case query.Type_INT64:
+ return typeInt64
+ case query.Type_UINT64:
+ return typeUint64
+ case query.Type_FLOAT32:
+ return typeFloat32
+ case query.Type_FLOAT64:
+ return typeFloat64
+ case query.Type_TIMESTAMP, query.Type_DECIMAL, query.Type_VARCHAR, query.Type_TEXT,
+ query.Type_BLOB, query.Type_VARBINARY, query.Type_CHAR, query.Type_BINARY, query.Type_BIT,
+ query.Type_ENUM, query.Type_SET, query.Type_TUPLE, query.Type_GEOMETRY, query.Type_JSON,
+ query.Type_HEXNUM, query.Type_HEXVAL, query.Type_BITNUM:
+
+ return typeRawBytes
+ case query.Type_DATE, query.Type_TIME, query.Type_DATETIME:
+ return typeTime
+ default:
+ return typeUnknown
+ }
+}
diff --git a/go/vt/vitessdriver/rows_test.go b/go/vt/vitessdriver/rows_test.go
index fdfc478ad16..13584e70dd8 100644
--- a/go/vt/vitessdriver/rows_test.go
+++ b/go/vt/vitessdriver/rows_test.go
@@ -18,10 +18,12 @@ package vitessdriver
import (
"database/sql/driver"
+ "fmt"
"io"
"reflect"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/sqltypes"
@@ -135,3 +137,92 @@ func TestRows(t *testing.T) {
_ = ri.Close()
}
+
+// Test that the ColumnTypeScanType function returns the correct reflection type for each
+// sql type. The sql type in turn comes from a table column's type.
+func TestColumnTypeScanType(t *testing.T) {
+ var r = sqltypes.Result{
+ Fields: []*querypb.Field{
+ {
+ Name: "field1",
+ Type: sqltypes.Int8,
+ },
+ {
+ Name: "field2",
+ Type: sqltypes.Uint8,
+ },
+ {
+ Name: "field3",
+ Type: sqltypes.Int16,
+ },
+ {
+ Name: "field4",
+ Type: sqltypes.Uint16,
+ },
+ {
+ Name: "field5",
+ Type: sqltypes.Int24,
+ },
+ {
+ Name: "field6",
+ Type: sqltypes.Uint24,
+ },
+ {
+ Name: "field7",
+ Type: sqltypes.Int32,
+ },
+ {
+ Name: "field8",
+ Type: sqltypes.Uint32,
+ },
+ {
+ Name: "field9",
+ Type: sqltypes.Int64,
+ },
+ {
+ Name: "field10",
+ Type: sqltypes.Uint64,
+ },
+ {
+ Name: "field11",
+ Type: sqltypes.Float32,
+ },
+ {
+ Name: "field12",
+ Type: sqltypes.Float64,
+ },
+ {
+ Name: "field13",
+ Type: sqltypes.VarBinary,
+ },
+ {
+ Name: "field14",
+ Type: sqltypes.Datetime,
+ },
+ },
+ }
+
+ ri := newRows(&r, &converter{}).(driver.RowsColumnTypeScanType)
+ defer ri.Close()
+
+ wantTypes := []reflect.Type{
+ typeInt8,
+ typeUint8,
+ typeInt16,
+ typeUint16,
+ typeInt32,
+ typeUint32,
+ typeInt32,
+ typeUint32,
+ typeInt64,
+ typeUint64,
+ typeFloat32,
+ typeFloat64,
+ typeRawBytes,
+ typeTime,
+ }
+
+ for i := 0; i < len(wantTypes); i++ {
+ assert.Equal(t, ri.ColumnTypeScanType(i), wantTypes[i], fmt.Sprintf("unexpected type %v, wanted %v", ri.ColumnTypeScanType(i), wantTypes[i]))
+ }
+}
diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go
index 2ebe638a457..714f64a59cd 100644
--- a/go/vt/vtadmin/api.go
+++ b/go/vt/vtadmin/api.go
@@ -341,6 +341,8 @@ func (api *API) Handler() http.Handler {
router.HandleFunc("/cells", httpAPI.Adapt(vtadminhttp.GetCellInfos)).Name("API.GetCellInfos")
router.HandleFunc("/cells_aliases", httpAPI.Adapt(vtadminhttp.GetCellsAliases)).Name("API.GetCellsAliases")
router.HandleFunc("/clusters", httpAPI.Adapt(vtadminhttp.GetClusters)).Name("API.GetClusters")
+ router.HandleFunc("/cluster/{cluster_id}/validate", httpAPI.Adapt(vtadminhttp.Validate)).Name("API.Validate").Methods("PUT", "OPTIONS")
+ router.HandleFunc("/cluster/{cluster_id}/topology", httpAPI.Adapt(vtadminhttp.GetTopologyPath)).Name("API.GetTopologyPath")
router.HandleFunc("/gates", httpAPI.Adapt(vtadminhttp.GetGates)).Name("API.GetGates")
router.HandleFunc("/keyspace/{cluster_id}", httpAPI.Adapt(vtadminhttp.CreateKeyspace)).Name("API.CreateKeyspace").Methods("POST")
router.HandleFunc("/keyspace/{cluster_id}/{name}", httpAPI.Adapt(vtadminhttp.DeleteKeyspace)).Name("API.DeleteKeyspace").Methods("DELETE")
@@ -358,6 +360,8 @@ func (api *API) Handler() http.Handler {
router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/emergency_failover", httpAPI.Adapt(vtadminhttp.EmergencyFailoverShard)).Name("API.EmergencyFailoverShard").Methods("POST")
router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/planned_failover", httpAPI.Adapt(vtadminhttp.PlannedFailoverShard)).Name("API.PlannedFailoverShard").Methods("POST")
router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/reload_schema_shard", httpAPI.Adapt(vtadminhttp.ReloadSchemaShard)).Name("API.ReloadSchemaShard").Methods("PUT", "OPTIONS")
+ router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/validate", httpAPI.Adapt(vtadminhttp.ValidateShard)).Name("API.ValidateShard").Methods("PUT", "OPTIONS")
+ router.HandleFunc("/shard/{cluster_id}/{keyspace}/{shard}/validate_version", httpAPI.Adapt(vtadminhttp.ValidateVersionShard)).Name("API.ValidateVersionShard").Methods("PUT", "OPTIONS")
router.HandleFunc("/shard_replication_positions", httpAPI.Adapt(vtadminhttp.GetShardReplicationPositions)).Name("API.GetShardReplicationPositions")
router.HandleFunc("/shards/{cluster_id}", httpAPI.Adapt(vtadminhttp.CreateShard)).Name("API.CreateShard").Methods("POST")
router.HandleFunc("/shards/{cluster_id}", httpAPI.Adapt(vtadminhttp.DeleteShards)).Name("API.DeleteShards").Methods("DELETE")
@@ -366,6 +370,7 @@ func (api *API) Handler() http.Handler {
router.HandleFunc("/tablets", httpAPI.Adapt(vtadminhttp.GetTablets)).Name("API.GetTablets")
router.HandleFunc("/tablet/{tablet}", httpAPI.Adapt(vtadminhttp.GetTablet)).Name("API.GetTablet").Methods("GET")
router.HandleFunc("/tablet/{tablet}", httpAPI.Adapt(vtadminhttp.DeleteTablet)).Name("API.DeleteTablet").Methods("DELETE", "OPTIONS")
+ router.HandleFunc("/tablet/{tablet}/full_status", httpAPI.Adapt(vtadminhttp.GetFullStatus)).Name("API.GetFullStatus").Methods("GET")
router.HandleFunc("/tablet/{tablet}/healthcheck", httpAPI.Adapt(vtadminhttp.RunHealthCheck)).Name("API.RunHealthCheck")
router.HandleFunc("/tablet/{tablet}/ping", httpAPI.Adapt(vtadminhttp.PingTablet)).Name("API.PingTablet")
router.HandleFunc("/tablet/{tablet}/refresh", httpAPI.Adapt(vtadminhttp.RefreshState)).Name("API.RefreshState").Methods("PUT", "OPTIONS")
@@ -771,6 +776,25 @@ func (api *API) GetClusters(ctx context.Context, req *vtadminpb.GetClustersReque
}, nil
}
+// GetFullStatus is part of the vtadminpb.VTAdminServer interface.
+func (api *API) GetFullStatus(ctx context.Context, req *vtadminpb.GetFullStatusRequest) (*vtctldatapb.GetFullStatusResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.GetFullStatus")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.TabletFullStatusResource, rbac.GetAction) {
+ return nil, nil
+ }
+
+ return c.Vtctld.GetFullStatus(ctx, &vtctldatapb.GetFullStatusRequest{
+ TabletAlias: req.Alias,
+ })
+}
+
// GetGates is part of the vtadminpb.VTAdminServer interface.
func (api *API) GetGates(ctx context.Context, req *vtadminpb.GetGatesRequest) (*vtadminpb.GetGatesResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.GetGates")
@@ -1147,6 +1171,25 @@ func (api *API) GetTablets(ctx context.Context, req *vtadminpb.GetTabletsRequest
}, nil
}
+// GetTopologyPath is part of the vtadminpb.VTAdminServer interface.
+func (api *API) GetTopologyPath(ctx context.Context, req *vtadminpb.GetTopologyPathRequest) (*vtctldatapb.GetTopologyPathResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.GetTopologyPath")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ cluster.AnnotateSpan(c, span)
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.TopologyResource, rbac.GetAction) {
+ return nil, nil
+ }
+
+ return c.Vtctld.GetTopologyPath(ctx, &vtctldatapb.GetTopologyPathRequest{Path: req.Path})
+}
+
// GetVSchema is part of the vtadminpb.VTAdminServer interface.
func (api *API) GetVSchema(ctx context.Context, req *vtadminpb.GetVSchemaRequest) (*vtadminpb.VSchema, error) {
span, ctx := trace.NewSpan(ctx, "API.GetVSchema")
@@ -1714,6 +1757,31 @@ func (api *API) TabletExternallyPromoted(ctx context.Context, req *vtadminpb.Tab
return c.TabletExternallyPromoted(ctx, tablet)
}
+// Validate is part of the vtadminpb.VTAdminServer interface.
+func (api *API) Validate(ctx context.Context, req *vtadminpb.ValidateRequest) (*vtctldatapb.ValidateResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.Validate")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.ClusterResource, rbac.PutAction) {
+ return nil, nil
+ }
+
+ res, err := c.Vtctld.Validate(ctx, &vtctldatapb.ValidateRequest{
+ PingTablets: req.PingTablets,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
// ValidateKeyspace is part of the vtadminpb.VTAdminServer interface.
func (api *API) ValidateKeyspace(ctx context.Context, req *vtadminpb.ValidateKeyspaceRequest) (*vtctldatapb.ValidateKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.ValidateKeyspace")
@@ -1765,6 +1833,33 @@ func (api *API) ValidateSchemaKeyspace(ctx context.Context, req *vtadminpb.Valid
return res, nil
}
+// ValidateShard is part of the vtadminpb.VTAdminServer interface.
+func (api *API) ValidateShard(ctx context.Context, req *vtadminpb.ValidateShardRequest) (*vtctldatapb.ValidateShardResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.ValidateShard")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.ShardResource, rbac.PutAction) {
+ return nil, nil
+ }
+
+ res, err := c.Vtctld.ValidateShard(ctx, &vtctldatapb.ValidateShardRequest{
+ Keyspace: req.Keyspace,
+ Shard: req.Shard,
+ PingTablets: req.PingTablets,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
// ValidateVersionKeyspace is part of the vtadminpb.VTAdminServer interface.
func (api *API) ValidateVersionKeyspace(ctx context.Context, req *vtadminpb.ValidateVersionKeyspaceRequest) (*vtctldatapb.ValidateVersionKeyspaceResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.ValidateVersionKeyspace")
@@ -1790,6 +1885,32 @@ func (api *API) ValidateVersionKeyspace(ctx context.Context, req *vtadminpb.Vali
return res, nil
}
+// ValidateVersionShard is part of the vtadminpb.VTAdminServer interface.
+func (api *API) ValidateVersionShard(ctx context.Context, req *vtadminpb.ValidateVersionShardRequest) (*vtctldatapb.ValidateVersionShardResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "API.ValidateVersionShard")
+ defer span.Finish()
+
+ c, err := api.getClusterForRequest(req.ClusterId)
+ if err != nil {
+ return nil, err
+ }
+
+ if !api.authz.IsAuthorized(ctx, c.ID, rbac.ShardResource, rbac.PutAction) {
+ return nil, nil
+ }
+
+ res, err := c.Vtctld.ValidateVersionShard(ctx, &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: req.Keyspace,
+ Shard: req.Shard,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
// VTExplain is part of the vtadminpb.VTAdminServer interface.
func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) (*vtadminpb.VTExplainResponse, error) {
span, ctx := trace.NewSpan(ctx, "API.VTExplain")
diff --git a/go/vt/vtadmin/api_authz_test.go b/go/vt/vtadmin/api_authz_test.go
index 36edfee1819..45d3e443c6e 100644
--- a/go/vt/vtadmin/api_authz_test.go
+++ b/go/vt/vtadmin/api_authz_test.go
@@ -3209,7 +3209,8 @@ func testClusters(t testing.TB) []*cluster.Cluster {
Keyspace: "test",
Name: "-",
Shard: &topodatapb.Shard{
- KeyRange: &topodatapb.KeyRange{},
+ KeyRange: &topodatapb.KeyRange{},
+ IsPrimaryServing: true,
},
},
},
diff --git a/go/vt/vtadmin/http/clusters.go b/go/vt/vtadmin/http/clusters.go
index ff02719679a..34744025d6e 100644
--- a/go/vt/vtadmin/http/clusters.go
+++ b/go/vt/vtadmin/http/clusters.go
@@ -18,8 +18,12 @@ package http
import (
"context"
+ "encoding/json"
+
+ "github.com/gorilla/mux"
vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin"
+ "vitess.io/vitess/go/vt/vtadmin/errors"
)
// GetClusters implements the http wrapper for /clusters
@@ -27,3 +31,41 @@ func GetClusters(ctx context.Context, r Request, api *API) *JSONResponse {
clusters, err := api.server.GetClusters(ctx, &vtadminpb.GetClustersRequest{})
return NewJSONResponse(clusters, err)
}
+
+// Validate implements the http wrapper for /cluster/{cluster_id}/validate
+func Validate(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := mux.Vars(r.Request)
+ decoder := json.NewDecoder(r.Body)
+ defer r.Body.Close()
+
+ var result struct {
+ PingTablets bool `json:"pingTablets"`
+ }
+
+ if err := decoder.Decode(&result); err != nil {
+ return NewJSONResponse(nil, &errors.BadRequest{
+ Err: err,
+ })
+ }
+
+ resp, err := api.server.Validate(ctx, &vtadminpb.ValidateRequest{
+ ClusterId: vars["cluster_id"],
+ PingTablets: result.PingTablets,
+ })
+ return NewJSONResponse(resp, err)
+}
+
+// GetTopologyPath implements the http wrapper for /cluster/{cluster_id}/topology
+//
+// Query params:
+// - path: string
+func GetTopologyPath(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := r.Vars()
+ query := r.URL.Query()
+
+ result, err := api.server.GetTopologyPath(ctx, &vtadminpb.GetTopologyPathRequest{
+ ClusterId: vars["cluster_id"],
+ Path: query["path"][0],
+ })
+ return NewJSONResponse(result, err)
+}
diff --git a/go/vt/vtadmin/http/shards.go b/go/vt/vtadmin/http/shards.go
index b0555f70f13..81cb1299913 100644
--- a/go/vt/vtadmin/http/shards.go
+++ b/go/vt/vtadmin/http/shards.go
@@ -199,3 +199,54 @@ func ReloadSchemaShard(ctx context.Context, r Request, api *API) *JSONResponse {
})
return NewJSONResponse(result, err)
}
+
+// ValidateShard implements the http wrapper for
+// PUT /shard/{cluster_id}/{keyspace}/{shard}/validate
+//
+// Query params: none
+//
+// Body params:
+// - ping_tablets: bool
+func ValidateShard(ctx context.Context, r Request, api *API) *JSONResponse {
+ decoder := json.NewDecoder(r.Body)
+ defer r.Body.Close()
+
+ var params struct {
+ PingTablets bool `json:"ping_tablets"`
+ }
+
+ if err := decoder.Decode(¶ms); err != nil {
+ return NewJSONResponse(nil, &errors.BadRequest{
+ Err: err,
+ })
+ }
+
+ vars := r.Vars()
+
+ result, err := api.server.ValidateShard(ctx, &vtadminpb.ValidateShardRequest{
+ ClusterId: vars["cluster_id"],
+ Keyspace: vars["keyspace"],
+ Shard: vars["shard"],
+ PingTablets: params.PingTablets,
+ })
+
+ return NewJSONResponse(result, err)
+}
+
+// ValidateVersionShard implements the http wrapper for
+// PUT /shard/{cluster_id}/{keyspace}/{shard}/validate_version
+//
+// Query params: none
+//
+// Body params: none
+func ValidateVersionShard(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := r.Vars()
+
+ result, err := api.server.ValidateVersionShard(ctx, &vtadminpb.ValidateVersionShardRequest{
+ ClusterId: vars["cluster_id"],
+ Keyspace: vars["keyspace"],
+ Shard: vars["shard"],
+ })
+
+ return NewJSONResponse(result, err)
+}
diff --git a/go/vt/vtadmin/http/tablets.go b/go/vt/vtadmin/http/tablets.go
index 322092d8b97..b812fd1aebb 100644
--- a/go/vt/vtadmin/http/tablets.go
+++ b/go/vt/vtadmin/http/tablets.go
@@ -22,6 +22,22 @@ import (
vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin"
)
+// GetFullStatus implements the http wrapper for /tablets/{tablet}/full_status
+func GetFullStatus(ctx context.Context, r Request, api *API) *JSONResponse {
+ vars := r.Vars()
+
+ alias, err := vars.GetTabletAlias("tablet")
+ if err != nil {
+ return NewJSONResponse(nil, err)
+ }
+ status, err := api.server.GetFullStatus(ctx, &vtadminpb.GetFullStatusRequest{
+ ClusterId: r.URL.Query()["cluster"][0],
+ Alias: alias,
+ })
+
+ return NewJSONResponse(status, err)
+}
+
// GetTablets implements the http wrapper for /tablets[?cluster=[&cluster=]].
func GetTablets(ctx context.Context, r Request, api *API) *JSONResponse {
tablets, err := api.server.GetTablets(ctx, &vtadminpb.GetTabletsRequest{
diff --git a/go/vt/vtadmin/rbac/rbac.go b/go/vt/vtadmin/rbac/rbac.go
index 2aba889ea46..7b5b0e8c8e8 100644
--- a/go/vt/vtadmin/rbac/rbac.go
+++ b/go/vt/vtadmin/rbac/rbac.go
@@ -90,7 +90,8 @@ type Resource string
// Resource definitions.
const (
- ClusterResource Resource = "Cluster"
+ ClusterResource Resource = "Cluster"
+ TopologyResource Resource = "Topology"
/* generic topo resources */
@@ -115,4 +116,6 @@ const (
WorkflowResource Resource = "Workflow"
VTExplainResource Resource = "VTExplain"
+
+ TabletFullStatusResource Resource = "TabletFullStatus"
)
diff --git a/go/vt/vtadmin/testutil/authztestgen/config.json b/go/vt/vtadmin/testutil/authztestgen/config.json
index 88470fe7e86..ac89d7f5557 100644
--- a/go/vt/vtadmin/testutil/authztestgen/config.json
+++ b/go/vt/vtadmin/testutil/authztestgen/config.json
@@ -23,7 +23,7 @@
{
"field": "FindAllShardsInKeyspaceResults",
"type": "map[string]struct{\nResponse *vtctldatapb.FindAllShardsInKeyspaceResponse\nError error}",
- "value": "\"test\": {\nResponse: &vtctldatapb.FindAllShardsInKeyspaceResponse{\nShards: map[string]*vtctldatapb.Shard{\n\"-\": {\nKeyspace: \"test\",\nName: \"-\",\nShard: &topodatapb.Shard{\nKeyRange: &topodatapb.KeyRange{},\n},\n},\n},\n},\n},"
+ "value": "\"test\": {\nResponse: &vtctldatapb.FindAllShardsInKeyspaceResponse{\nShards: map[string]*vtctldatapb.Shard{\n\"-\": {\nKeyspace: \"test\",\nName: \"-\",\nShard: &topodatapb.Shard{\nKeyRange: &topodatapb.KeyRange{},\nIsPrimaryServing: true,\n},\n},\n},\n},\n},"
},
{
"field": "GetBackupsResults",
diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go
index 6164aa793e8..a113eb1a1c7 100644
--- a/go/vt/vtcombo/tablet_map.go
+++ b/go/vt/vtcombo/tablet_map.go
@@ -903,7 +903,7 @@ func (itmc *internalTabletManagerClient) Backup(context.Context, *topodatapb.Tab
return nil, fmt.Errorf("not implemented in vtcombo")
}
-func (itmc *internalTabletManagerClient) RestoreFromBackup(context.Context, *topodatapb.Tablet, time.Time) (logutil.EventStream, error) {
+func (itmc *internalTabletManagerClient) RestoreFromBackup(context.Context, *topodatapb.Tablet, time.Time, []string) (logutil.EventStream, error) {
return nil, fmt.Errorf("not implemented in vtcombo")
}
diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go
index 777a9ac44d7..2cb78f13d27 100644
--- a/go/vt/vtctl/grpcvtctldclient/client_gen.go
+++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go
@@ -47,15 +47,6 @@ func (client *gRPCVtctldClient) AddCellsAlias(ctx context.Context, in *vtctldata
return client.c.AddCellsAlias(ctx, in, opts...)
}
-// ApplyShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *gRPCVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
- if client.c == nil {
- return nil, status.Error(codes.Unavailable, connClosedMsg)
- }
-
- return client.c.ApplyShardRoutingRules(ctx, in, opts...)
-}
-
// ApplyRoutingRules is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtctldatapb.ApplyRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyRoutingRulesResponse, error) {
if client.c == nil {
@@ -74,6 +65,15 @@ func (client *gRPCVtctldClient) ApplySchema(ctx context.Context, in *vtctldatapb
return client.c.ApplySchema(ctx, in, opts...)
}
+// ApplyShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.ApplyShardRoutingRules(ctx, in, opts...)
+}
+
// ApplyVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) ApplyVSchema(ctx context.Context, in *vtctldatapb.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyVSchemaResponse, error) {
if client.c == nil {
@@ -308,15 +308,6 @@ func (client *gRPCVtctldClient) GetRoutingRules(ctx context.Context, in *vtctlda
return client.c.GetRoutingRules(ctx, in, opts...)
}
-// GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *gRPCVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
- if client.c == nil {
- return nil, status.Error(codes.Unavailable, connClosedMsg)
- }
-
- return client.c.GetShardRoutingRules(ctx, in, opts...)
-}
-
// GetSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) {
if client.c == nil {
@@ -335,6 +326,15 @@ func (client *gRPCVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.Ge
return client.c.GetShard(ctx, in, opts...)
}
+// GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.GetShardRoutingRules(ctx, in, opts...)
+}
+
// GetSrvKeyspaceNames is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) GetSrvKeyspaceNames(ctx context.Context, in *vtctldatapb.GetSrvKeyspaceNamesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSrvKeyspaceNamesResponse, error) {
if client.c == nil {
@@ -389,6 +389,15 @@ func (client *gRPCVtctldClient) GetTablets(ctx context.Context, in *vtctldatapb.
return client.c.GetTablets(ctx, in, opts...)
}
+// GetTopologyPath is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) GetTopologyPath(ctx context.Context, in *vtctldatapb.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldatapb.GetTopologyPathResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.GetTopologyPath(ctx, in, opts...)
+}
+
// GetVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *gRPCVtctldClient) GetVSchema(ctx context.Context, in *vtctldatapb.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetVSchemaResponse, error) {
if client.c == nil {
@@ -757,3 +766,12 @@ func (client *gRPCVtctldClient) ValidateVersionKeyspace(ctx context.Context, in
return client.c.ValidateVersionKeyspace(ctx, in, opts...)
}
+
+// ValidateVersionShard is part of the vtctlservicepb.VtctldClient interface.
+func (client *gRPCVtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldatapb.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionShardResponse, error) {
+ if client.c == nil {
+ return nil, status.Error(codes.Unavailable, connClosedMsg)
+ }
+
+ return client.c.ValidateVersionShard(ctx, in, opts...)
+}
diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go
index 19a580c451c..43ae4d95475 100644
--- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go
+++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go
@@ -63,25 +63,23 @@ func TestInitShardPrimary(t *testing.T) {
tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// These come from InitShardPrimary
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
tablet2.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet2.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort))
tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -131,7 +129,6 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) {
tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -140,7 +137,6 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) {
tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"FAKE RESET ALL REPLICATION",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go
index 7423efad50f..364a20fb0a3 100644
--- a/go/vt/vtctl/grpcvtctldserver/server.go
+++ b/go/vt/vtctl/grpcvtctldserver/server.go
@@ -382,6 +382,7 @@ func (s *VtctldServer) Backup(req *vtctldatapb.BackupRequest, stream vtctlservic
span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias))
span.Annotate("allow_primary", req.AllowPrimary)
span.Annotate("concurrency", req.Concurrency)
+ span.Annotate("backup_engine", req.BackupEngine)
ti, err := s.ts.GetTablet(ctx, req.TabletAlias)
if err != nil {
@@ -455,8 +456,13 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v
func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tablet, req *vtctldatapb.BackupRequest, stream interface {
Send(resp *vtctldatapb.BackupResponse) error
-}) error {
- r := &tabletmanagerdatapb.BackupRequest{Concurrency: int64(req.Concurrency), AllowPrimary: req.AllowPrimary}
+},
+) error {
+ r := &tabletmanagerdatapb.BackupRequest{
+ Concurrency: int64(req.Concurrency),
+ AllowPrimary: req.AllowPrimary,
+ BackupEngine: req.BackupEngine,
+ }
logStream, err := s.tmc.Backup(ctx, tablet, r)
if err != nil {
return err
@@ -995,6 +1001,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat
IgnoreReplicas: sets.NewString(ignoreReplicaAliases...),
WaitReplicasTimeout: waitReplicasTimeout,
PreventCrossCellPromotion: req.PreventCrossCellPromotion,
+ ExpectedPrimaryAlias: req.ExpectedPrimary,
},
)
@@ -1672,7 +1679,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable
case len(req.TabletAliases) > 0:
span.Annotate("tablet_aliases", strings.Join(topoproto.TabletAliasList(req.TabletAliases).ToStringSlice(), ","))
- tabletMap, err = s.ts.GetTabletMap(ctx, req.TabletAliases)
+ tabletMap, err = s.ts.GetTabletMap(ctx, req.TabletAliases, nil)
if err != nil {
err = fmt.Errorf("GetTabletMap(%v) failed: %w", req.TabletAliases, err)
}
@@ -1746,7 +1753,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable
go func(cell string) {
defer wg.Done()
- tablets, err := s.ts.GetTabletsByCell(ctx, cell)
+ tablets, err := s.ts.GetTabletsByCell(ctx, cell, nil)
if err != nil {
if req.Strict {
log.Infof("GetTablets got an error from cell %s: %s. Running in strict mode, so canceling other cell RPCs", cell, err)
@@ -1813,6 +1820,39 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable
}, nil
}
+// GetTopologyPath is part of the vtctlservicepb.VtctldServer interface.
+// It returns the cell located at the provided path in the topology server.
+func (s *VtctldServer) GetTopologyPath(ctx context.Context, req *vtctldatapb.GetTopologyPathRequest) (*vtctldatapb.GetTopologyPathResponse, error) {
+ span, ctx := trace.NewSpan(ctx, "VtctldServer.GetTopology")
+ defer span.Finish()
+
+ // handle toplevel display: global, then one line per cell.
+ if req.Path == "/" {
+ cells, err := s.ts.GetKnownCells(ctx)
+ if err != nil {
+ return nil, err
+ }
+ resp := vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Path: req.Path,
+ // the toplevel display has no name, just children
+ Children: append([]string{topo.GlobalCell}, cells...),
+ },
+ }
+ return &resp, nil
+ }
+
+ // otherwise, delegate to getTopologyCell to parse the path and return the cell there
+ cell, err := s.getTopologyCell(ctx, req.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &vtctldatapb.GetTopologyPathResponse{
+ Cell: cell,
+ }, nil
+}
+
// GetVersion returns the version of a tablet from its debug vars
func (s *VtctldServer) GetVersion(ctx context.Context, req *vtctldatapb.GetVersionRequest) (resp *vtctldatapb.GetVersionResponse, err error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.GetVersion")
@@ -2189,6 +2229,10 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap
span.Annotate("avoid_primary_alias", topoproto.TabletAliasString(req.AvoidPrimary))
}
+ if req.ExpectedPrimary != nil {
+ span.Annotate("expected_primary_alias", topoproto.TabletAliasString(req.ExpectedPrimary))
+ }
+
if req.NewPrimary != nil {
span.Annotate("new_primary_alias", topoproto.TabletAliasString(req.NewPrimary))
}
@@ -2206,9 +2250,10 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap
req.Keyspace,
req.Shard,
reparentutil.PlannedReparentOptions{
- AvoidPrimaryAlias: req.AvoidPrimary,
- NewPrimaryAlias: req.NewPrimary,
- WaitReplicasTimeout: waitReplicasTimeout,
+ AvoidPrimaryAlias: req.AvoidPrimary,
+ NewPrimaryAlias: req.NewPrimary,
+ ExpectedPrimaryAlias: req.ExpectedPrimary,
+ WaitReplicasTimeout: waitReplicasTimeout,
},
)
@@ -2624,7 +2669,7 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque
span.Annotate("keyspace", ti.Keyspace)
span.Annotate("shard", ti.Shard)
- logStream, err := s.tmc.RestoreFromBackup(ctx, ti.Tablet, protoutil.TimeFromProto(req.BackupTime))
+ logStream, err := s.tmc.RestoreFromBackup(ctx, ti.Tablet, protoutil.TimeFromProto(req.BackupTime), req.AllowedBackupEngines)
if err != nil {
return err
}
@@ -3841,7 +3886,7 @@ func (s *VtctldServer) ValidateShard(ctx context.Context, req *vtctldatapb.Valid
getTabletMapCtx, getTabletMapCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
defer getTabletMapCancel()
- tabletMap, _ := s.ts.GetTabletMap(getTabletMapCtx, aliases)
+ tabletMap, _ := s.ts.GetTabletMap(getTabletMapCtx, aliases, nil)
var primaryAlias *topodatapb.TabletAlias
for _, alias := range aliases {
@@ -4093,6 +4138,63 @@ func (s *VtctldServer) ValidateVersionKeyspace(ctx context.Context, req *vtctlda
return resp, err
}
+// ValidateVersionShard validates all versions are the same in all
+// tablets in a shard
+func (s *VtctldServer) ValidateVersionShard(ctx context.Context, req *vtctldatapb.ValidateVersionShardRequest) (resp *vtctldatapb.ValidateVersionShardResponse, err error) {
+ span, ctx := trace.NewSpan(ctx, "VtctldServer.ValidateVersionShard")
+ defer span.Finish()
+
+ defer panicHandler(&err)
+
+ shard, err := s.ts.GetShard(ctx, req.Keyspace, req.Shard)
+ if err != nil {
+ err = fmt.Errorf("GetShard(%s) failed: %v", req.Shard, err)
+ return nil, err
+ }
+
+ if !shard.HasPrimary() {
+ err = fmt.Errorf("no primary in shard %v/%v", req.Keyspace, req.Shard)
+ return nil, err
+ }
+
+ log.Infof("Gathering version for primary %v", topoproto.TabletAliasString(shard.PrimaryAlias))
+ primaryVersion, err := s.GetVersion(ctx, &vtctldatapb.GetVersionRequest{
+ TabletAlias: shard.PrimaryAlias,
+ })
+ if err != nil {
+ err = fmt.Errorf("GetVersion(%s) failed: %v", topoproto.TabletAliasString(shard.PrimaryAlias), err)
+ return nil, err
+ }
+
+ aliases, err := s.ts.FindAllTabletAliasesInShard(ctx, req.Keyspace, req.Shard)
+ if err != nil {
+ err = fmt.Errorf("FindAllTabletAliasesInShard(%s, %s) failed: %v", req.Keyspace, req.Shard, err)
+ return nil, err
+ }
+
+ er := concurrency.AllErrorRecorder{}
+ wg := sync.WaitGroup{}
+ for _, alias := range aliases {
+ if topoproto.TabletAliasEqual(alias, shard.PrimaryAlias) {
+ continue
+ }
+
+ wg.Add(1)
+ go func(alias *topodatapb.TabletAlias) {
+ s.diffVersion(ctx, primaryVersion.Version, shard.PrimaryAlias, alias, &wg, &er)
+ }(alias)
+ }
+
+ wg.Wait()
+
+ response := vtctldatapb.ValidateVersionShardResponse{}
+ if er.HasErrors() {
+ response.Results = append(response.Results, er.ErrorStrings()...)
+ }
+
+ return &response, nil
+}
+
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences
func (s *VtctldServer) ValidateVSchema(ctx context.Context, req *vtctldatapb.ValidateVSchemaRequest) (resp *vtctldatapb.ValidateVSchemaResponse, err error) {
span, ctx := trace.NewSpan(ctx, "VtctldServer.ValidateVSchema")
@@ -4183,6 +4285,54 @@ func StartServer(s *grpc.Server, ts *topo.Server) {
vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts))
}
+// getTopologyCell is a helper method that returns a topology cell given its path.
+func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*vtctldatapb.TopologyCell, error) {
+ // extract cell and relative path
+ parts := strings.Split(cellPath, "/")
+ if parts[0] != "" || len(parts) < 2 {
+ err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid path: %s", cellPath)
+ return nil, err
+ }
+ cell := parts[1]
+ relativePath := cellPath[len(cell)+1:]
+ topoCell := vtctldatapb.TopologyCell{Name: parts[len(parts)-1], Path: cellPath}
+
+ conn, err := s.ts.ConnForCell(ctx, cell)
+ if err != nil {
+ err := vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "error fetching connection to cell %s: %v", cell, err)
+ return nil, err
+ }
+
+ data, _, dataErr := conn.Get(ctx, relativePath)
+
+ if dataErr == nil {
+ result, err := topo.DecodeContent(relativePath, data, false)
+ if err != nil {
+ err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err)
+ return nil, err
+ }
+ topoCell.Data = result
+ // since there is data at this cell, it cannot be a directory cell
+ // so we can early return the topocell
+ return &topoCell, nil
+ }
+
+ children, childrenErr := conn.ListDir(ctx, relativePath, false /*full*/)
+
+ if childrenErr != nil && dataErr != nil {
+ err := vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err)
+ return nil, err
+ }
+
+ topoCell.Children = make([]string, len(children))
+
+ for i, c := range children {
+ topoCell.Children[i] = c.Name
+ }
+
+ return &topoCell, nil
+}
+
// Helper function to get version of a tablet from its debug vars
var getVersionFromTabletDebugVars = func(tabletAddr string) (string, error) {
resp, err := http.Get("http://" + tabletAddr + "/debug/vars")
@@ -4211,3 +4361,20 @@ var getVersionFromTabletDebugVars = func(tabletAddr string) (string, error) {
}
var getVersionFromTablet = getVersionFromTabletDebugVars
+
+// helper method to asynchronously get and diff a version
+func (s *VtctldServer) diffVersion(ctx context.Context, primaryVersion string, primaryAlias *topodatapb.TabletAlias, alias *topodatapb.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) {
+ defer wg.Done()
+ log.Infof("Gathering version for %v", topoproto.TabletAliasString(alias))
+ replicaVersion, err := s.GetVersion(ctx, &vtctldatapb.GetVersionRequest{
+ TabletAlias: alias,
+ })
+ if err != nil {
+ er.RecordError(fmt.Errorf("unable to get version for tablet %v: %v", alias, err))
+ return
+ }
+
+ if primaryVersion != replicaVersion.Version {
+ er.RecordError(fmt.Errorf("primary %v version %v is different than replica %v version %v", topoproto.TabletAliasString(primaryAlias), primaryVersion, topoproto.TabletAliasString(alias), replicaVersion))
+ }
+}
diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go
index d9b10b77b91..2f8a15b14a9 100644
--- a/go/vt/vtctl/grpcvtctldserver/server_test.go
+++ b/go/vt/vtctl/grpcvtctldserver/server_test.go
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"sort"
+ "sync"
"testing"
"time"
@@ -2704,7 +2705,7 @@ func TestDeleteShards(t *testing.T) {
defer func() {
topofactory.SetError(nil)
- actualShards := []*vtctldatapb.Shard{}
+ var actualShards []*vtctldatapb.Shard
keyspaces, err := ts.GetKeyspaces(ctx)
require.NoError(t, err, "cannot get keyspace names to check remaining shards")
@@ -5957,6 +5958,94 @@ func TestGetTablets(t *testing.T) {
}
}
+func TestGetTopologyPath(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ ts := memorytopo.NewServer("cell1", "cell2", "cell3")
+ vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer {
+ return NewVtctldServer(ts)
+ })
+
+ err := ts.CreateKeyspace(ctx, "keyspace1", &topodatapb.Keyspace{})
+ require.NoError(t, err)
+
+ testutil.AddTablets(ctx, t, ts, nil, &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{Cell: "cell1", Uid: 100},
+ Hostname: "localhost",
+ Keyspace: "keyspace1",
+ MysqlHostname: "localhost",
+ MysqlPort: 17100,
+ })
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ path string
+ shouldErr bool
+ expected *vtctldatapb.GetTopologyPathResponse
+ }{
+ {
+ name: "root path",
+ path: "/",
+ expected: &vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Path: "/",
+ Children: []string{"global", "cell1", "cell2", "cell3"},
+ },
+ },
+ },
+ {
+ name: "invalid path",
+ path: "",
+ shouldErr: true,
+ },
+ {
+ name: "global path",
+ path: "/global",
+ expected: &vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Name: "global",
+ Path: "/global",
+ Children: []string{"cells", "keyspaces"},
+ },
+ },
+ },
+ {
+ name: "terminal data path",
+ path: "/cell1/tablets/cell1-0000000100/Tablet",
+ expected: &vtctldatapb.GetTopologyPathResponse{
+ Cell: &vtctldatapb.TopologyCell{
+ Name: "Tablet",
+ Path: "/cell1/tablets/cell1-0000000100/Tablet",
+ Data: "alias:{cell:\"cell1\" uid:100} hostname:\"localhost\" keyspace:\"keyspace1\" mysql_hostname:\"localhost\" mysql_port:17100",
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ resp, err := vtctld.GetTopologyPath(ctx, &vtctldatapb.GetTopologyPathRequest{
+ Path: tt.path,
+ })
+
+ if tt.shouldErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ utils.MustMatch(t, tt.expected, resp)
+ })
+ }
+}
+
func TestGetVSchema(t *testing.T) {
t.Parallel()
@@ -11180,6 +11269,122 @@ func TestValidateVersionKeyspace(t *testing.T) {
}
}
+func TestValidateVersionShard(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ ts := memorytopo.NewServer("zone1", "zone2")
+ tmc := testutil.TabletManagerClient{
+ GetSchemaResults: map[string]struct {
+ Schema *tabletmanagerdatapb.SchemaDefinition
+ Error error
+ }{},
+ }
+ testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{
+ Name: "ks",
+ Keyspace: &topodatapb.Keyspace{
+ KeyspaceType: topodatapb.KeyspaceType_NORMAL,
+ },
+ })
+
+ tablets := []*topodatapb.Tablet{
+ {
+ Keyspace: "ks",
+ Shard: "-",
+ Type: topodatapb.TabletType_PRIMARY,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Hostname: "primary",
+ },
+ {
+ Keyspace: "ks",
+ Shard: "-",
+ Type: topodatapb.TabletType_REPLICA,
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Hostname: "replica",
+ },
+ }
+ testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{
+ AlsoSetShardPrimary: true,
+ ForceSetShardPrimary: true,
+ SkipShardCreation: false,
+ }, tablets...)
+
+ vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer {
+ return NewVtctldServer(ts)
+ })
+
+ tests := []*struct {
+ name string
+ req *vtctldatapb.ValidateVersionShardRequest
+ expected *vtctldatapb.ValidateVersionShardResponse
+ setup func(*sync.Mutex)
+ shouldErr bool
+ }{
+ {
+ name: "valid versions",
+ req: &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: "ks",
+ Shard: "-",
+ },
+ expected: &vtctldatapb.ValidateVersionShardResponse{
+ Results: []string{},
+ },
+ setup: func(testSetupMu *sync.Mutex) {
+ testSetupMu.Lock()
+ defer testSetupMu.Unlock()
+ addrVersionMap := map[string]string{
+ "primary:0": "version1",
+ "replica:0": "version1",
+ }
+ getVersionFromTablet = testutil.MockGetVersionFromTablet(addrVersionMap)
+ },
+ shouldErr: false,
+ },
+ {
+ name: "different versions",
+ req: &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: "ks",
+ Shard: "-",
+ },
+ expected: &vtctldatapb.ValidateVersionShardResponse{
+ Results: []string{"primary zone1-0000000100 version version1 is different than replica zone1-0000000101 version version:\"version2\""},
+ },
+ setup: func(testSetupMu *sync.Mutex) {
+ testSetupMu.Lock()
+ defer testSetupMu.Unlock()
+ addrVersionMap := map[string]string{
+ "primary:0": "version1",
+ "replica:0": "version2",
+ }
+ getVersionFromTablet = testutil.MockGetVersionFromTablet(addrVersionMap)
+ },
+ shouldErr: false,
+ },
+ }
+
+ var testSetupMu sync.Mutex
+ for _, tt := range tests {
+ curT := tt
+ t.Run(tt.name, func(t *testing.T) {
+ curT.setup(&testSetupMu)
+ resp, err := vtctld.ValidateVersionShard(ctx, curT.req)
+ if curT.shouldErr {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ utils.MustMatch(t, curT.expected, resp)
+ })
+ }
+}
+
func TestValidateShard(t *testing.T) {
t.Parallel()
diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
index 5ed8a30190c..776bace5f24 100644
--- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
+++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go
@@ -184,6 +184,8 @@ type TabletManagerClient struct {
EventJitter time.Duration
ErrorAfter time.Duration
}
+ // Backing Up - keyed by tablet alias.
+ TabletsBackupState map[string]bool
// keyed by tablet alias.
ChangeTabletTypeResult map[string]error
// keyed by tablet alias.
@@ -864,6 +866,9 @@ func (fake *TabletManagerClient) ReplicationStatus(ctx context.Context, tablet *
}
if result, ok := fake.ReplicationStatusResults[key]; ok {
+ if _, ok = fake.TabletsBackupState[key]; ok {
+ result.Position.BackupRunning = fake.TabletsBackupState[key]
+ }
return result.Position, result.Error
}
@@ -900,7 +905,7 @@ func (stream *backupRestoreStreamAdapter) Send(msg *logutilpb.Event) error {
}
// RestoreFromBackup is part of the tmclient.TabletManagerClient interface.
-func (fake *TabletManagerClient) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time) (logutil.EventStream, error) {
+func (fake *TabletManagerClient) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time, allowedBackupEngines []string) (logutil.EventStream, error) {
key := topoproto.TabletAliasString(tablet.Alias)
testdata, ok := fake.RestoreFromBackupResults[key]
if !ok {
diff --git a/go/vt/vtctl/grpcvtctldserver/topo.go b/go/vt/vtctl/grpcvtctldserver/topo.go
index 70fae6613aa..5ec369ca17f 100644
--- a/go/vt/vtctl/grpcvtctldserver/topo.go
+++ b/go/vt/vtctl/grpcvtctldserver/topo.go
@@ -161,7 +161,7 @@ func deleteShardCell(ctx context.Context, ts *topo.Server, keyspace string, shar
// Get all the tablet records for the aliases we've collected. Note that
// GetTabletMap ignores ErrNoNode, which is convenient for our purpose; it
// means a tablet was deleted but is still referenced.
- tabletMap, err := ts.GetTabletMap(ctx, aliases)
+ tabletMap, err := ts.GetTabletMap(ctx, aliases, nil)
if err != nil {
return fmt.Errorf("GetTabletMap() failed: %w", err)
}
diff --git a/go/vt/vtctl/localvtctldclient/client_gen.go b/go/vt/vtctl/localvtctldclient/client_gen.go
index 45b8629f858..fae824d7edf 100644
--- a/go/vt/vtctl/localvtctldclient/client_gen.go
+++ b/go/vt/vtctl/localvtctldclient/client_gen.go
@@ -44,16 +44,16 @@ func (client *localVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtct
return client.s.ApplyRoutingRules(ctx, in)
}
-// ApplyRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *localVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
- return client.s.ApplyShardRoutingRules(ctx, in)
-}
-
// ApplySchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) ApplySchema(ctx context.Context, in *vtctldatapb.ApplySchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplySchemaResponse, error) {
return client.s.ApplySchema(ctx, in)
}
+// ApplyShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldatapb.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyShardRoutingRulesResponse, error) {
+ return client.s.ApplyShardRoutingRules(ctx, in)
+}
+
// ApplyVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) ApplyVSchema(ctx context.Context, in *vtctldatapb.ApplyVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyVSchemaResponse, error) {
return client.s.ApplyVSchema(ctx, in)
@@ -276,11 +276,6 @@ func (client *localVtctldClient) GetRoutingRules(ctx context.Context, in *vtctld
return client.s.GetRoutingRules(ctx, in)
}
-// GetRoutingRules is part of the vtctlservicepb.VtctldClient interface.
-func (client *localVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
- return client.s.GetShardRoutingRules(ctx, in)
-}
-
// GetSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) {
return client.s.GetSchema(ctx, in)
@@ -291,6 +286,11 @@ func (client *localVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.G
return client.s.GetShard(ctx, in)
}
+// GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) {
+ return client.s.GetShardRoutingRules(ctx, in)
+}
+
// GetSrvKeyspaceNames is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) GetSrvKeyspaceNames(ctx context.Context, in *vtctldatapb.GetSrvKeyspaceNamesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSrvKeyspaceNamesResponse, error) {
return client.s.GetSrvKeyspaceNames(ctx, in)
@@ -321,6 +321,11 @@ func (client *localVtctldClient) GetTablets(ctx context.Context, in *vtctldatapb
return client.s.GetTablets(ctx, in)
}
+// GetTopologyPath is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) GetTopologyPath(ctx context.Context, in *vtctldatapb.GetTopologyPathRequest, opts ...grpc.CallOption) (*vtctldatapb.GetTopologyPathResponse, error) {
+ return client.s.GetTopologyPath(ctx, in)
+}
+
// GetVSchema is part of the vtctlservicepb.VtctldClient interface.
func (client *localVtctldClient) GetVSchema(ctx context.Context, in *vtctldatapb.GetVSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetVSchemaResponse, error) {
return client.s.GetVSchema(ctx, in)
@@ -571,3 +576,8 @@ func (client *localVtctldClient) ValidateVSchema(ctx context.Context, in *vtctld
func (client *localVtctldClient) ValidateVersionKeyspace(ctx context.Context, in *vtctldatapb.ValidateVersionKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionKeyspaceResponse, error) {
return client.s.ValidateVersionKeyspace(ctx, in)
}
+
+// ValidateVersionShard is part of the vtctlservicepb.VtctldClient interface.
+func (client *localVtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldatapb.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionShardResponse, error) {
+ return client.s.ValidateVersionShard(ctx, in)
+}
diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go
index b2f030e0756..43844eb9388 100644
--- a/go/vt/vtctl/reparent.go
+++ b/go/vt/vtctl/reparent.go
@@ -38,10 +38,12 @@ func init() {
help: "Reparent a tablet to the current primary in the shard. This only works if the current replication position matches the last known reparent action.",
})
addCommand("Shards", command{
- name: "InitShardPrimary",
- method: commandInitShardPrimary,
- params: "[--force] [--wait_replicas_timeout=] ",
- help: "Sets the initial primary for a shard. Will make all other tablets in the shard replicas of the provided tablet. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead.",
+ name: "InitShardPrimary",
+ method: commandInitShardPrimary,
+ params: "[--force] [--wait_replicas_timeout=] ",
+ help: "Sets the initial primary for a shard. Will make all other tablets in the shard replicas of the provided tablet. WARNING: this could cause data loss on an already replicating shard. PlannedReparentShard or EmergencyReparentShard should be used instead.",
+ deprecated: true,
+ deprecatedBy: "PlannedReparentShard",
})
addCommand("Shards", command{
name: "PlannedReparentShard",
diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/durability.go
index 735965c3afa..8fd1634faa0 100644
--- a/go/vt/vtctl/reparentutil/durability.go
+++ b/go/vt/vtctl/reparentutil/durability.go
@@ -55,13 +55,13 @@ func init() {
// Durabler is the interface which is used to get the promotion rules for candidates and the semi sync setup
type Durabler interface {
- // promotionRule represents the precedence in which we want to tablets to be promoted.
+ // PromotionRule represents the precedence in which we want to tablets to be promoted.
// The higher the promotion rule of a tablet, the more we want it to be promoted in case of a failover
- promotionRule(*topodatapb.Tablet) promotionrule.CandidatePromotionRule
- // semiSyncAckers represents the number of semi-sync ackers required for a given tablet if it were to become the PRIMARY instance
- semiSyncAckers(*topodatapb.Tablet) int
- // isReplicaSemiSync returns whether the "replica" should send semi-sync acks if "primary" were to become the PRIMARY instance
- isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool
+ PromotionRule(*topodatapb.Tablet) promotionrule.CandidatePromotionRule
+ // SemiSyncAckers represents the number of semi-sync ackers required for a given tablet if it were to become the PRIMARY instance
+ SemiSyncAckers(*topodatapb.Tablet) int
+ // IsReplicaSemiSync returns whether the "replica" should send semi-sync acks if "primary" were to become the PRIMARY instance
+ IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool
}
func RegisterDurability(name string, newDurablerFunc NewDurabler) {
@@ -94,13 +94,13 @@ func PromotionRule(durability Durabler, tablet *topodatapb.Tablet) promotionrule
if tablet == nil || tablet.Alias == nil {
return promotionrule.MustNot
}
- return durability.promotionRule(tablet)
+ return durability.PromotionRule(tablet)
}
// SemiSyncAckers returns the primary semi-sync setting for the instance.
// 0 means none. Non-zero specifies the number of required ackers.
func SemiSyncAckers(durability Durabler, tablet *topodatapb.Tablet) int {
- return durability.semiSyncAckers(tablet)
+ return durability.SemiSyncAckers(tablet)
}
// IsReplicaSemiSync returns the replica semi-sync setting from the tablet record.
@@ -110,7 +110,7 @@ func IsReplicaSemiSync(durability Durabler, primary, replica *topodatapb.Tablet)
if primary == nil || primary.Alias == nil || replica == nil || replica.Alias == nil {
return false
}
- return durability.isReplicaSemiSync(primary, replica)
+ return durability.IsReplicaSemiSync(primary, replica)
}
//=======================================================================
@@ -118,8 +118,8 @@ func IsReplicaSemiSync(durability Durabler, primary, replica *topodatapb.Tablet)
// durabilityNone has no semi-sync and returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else
type durabilityNone struct{}
-// promotionRule implements the Durabler interface
-func (d *durabilityNone) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
+// PromotionRule implements the Durabler interface
+func (d *durabilityNone) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
switch tablet.Type {
case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA:
return promotionrule.Neutral
@@ -127,13 +127,13 @@ func (d *durabilityNone) promotionRule(tablet *topodatapb.Tablet) promotionrule.
return promotionrule.MustNot
}
-// semiSyncAckers implements the Durabler interface
-func (d *durabilityNone) semiSyncAckers(tablet *topodatapb.Tablet) int {
+// SemiSyncAckers implements the Durabler interface
+func (d *durabilityNone) SemiSyncAckers(tablet *topodatapb.Tablet) int {
return 0
}
-// isReplicaSemiSync implements the Durabler interface
-func (d *durabilityNone) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
+// IsReplicaSemiSync implements the Durabler interface
+func (d *durabilityNone) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
return false
}
@@ -143,8 +143,8 @@ func (d *durabilityNone) isReplicaSemiSync(primary, replica *topodatapb.Tablet)
// It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else
type durabilitySemiSync struct{}
-// promotionRule implements the Durabler interface
-func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
+// PromotionRule implements the Durabler interface
+func (d *durabilitySemiSync) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
switch tablet.Type {
case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA:
return promotionrule.Neutral
@@ -152,13 +152,13 @@ func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionr
return promotionrule.MustNot
}
-// semiSyncAckers implements the Durabler interface
-func (d *durabilitySemiSync) semiSyncAckers(tablet *topodatapb.Tablet) int {
+// SemiSyncAckers implements the Durabler interface
+func (d *durabilitySemiSync) SemiSyncAckers(tablet *topodatapb.Tablet) int {
return 1
}
-// isReplicaSemiSync implements the Durabler interface
-func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
+// IsReplicaSemiSync implements the Durabler interface
+func (d *durabilitySemiSync) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
switch replica.Type {
case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA:
return true
@@ -173,8 +173,8 @@ func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tabl
// It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else
type durabilityCrossCell struct{}
-// promotionRule implements the Durabler interface
-func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
+// PromotionRule implements the Durabler interface
+func (d *durabilityCrossCell) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
switch tablet.Type {
case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA:
return promotionrule.Neutral
@@ -182,13 +182,13 @@ func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotion
return promotionrule.MustNot
}
-// semiSyncAckers implements the Durabler interface
-func (d *durabilityCrossCell) semiSyncAckers(tablet *topodatapb.Tablet) int {
+// SemiSyncAckers implements the Durabler interface
+func (d *durabilityCrossCell) SemiSyncAckers(tablet *topodatapb.Tablet) int {
return 1
}
-// isReplicaSemiSync implements the Durabler interface
-func (d *durabilityCrossCell) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
+// IsReplicaSemiSync implements the Durabler interface
+func (d *durabilityCrossCell) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
switch replica.Type {
case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA:
return primary.Alias.Cell != replica.Alias.Cell
@@ -201,8 +201,8 @@ func (d *durabilityCrossCell) isReplicaSemiSync(primary, replica *topodatapb.Tab
// durabilityTest is like durabilityNone. It overrides the type for a specific tablet to prefer. It is only meant to be used for testing purposes!
type durabilityTest struct{}
-// promotionRule implements the Durabler interface
-func (d *durabilityTest) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
+// PromotionRule implements the Durabler interface
+func (d *durabilityTest) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule {
if topoproto.TabletAliasString(tablet.Alias) == "zone2-0000000200" {
return promotionrule.Prefer
}
@@ -214,12 +214,12 @@ func (d *durabilityTest) promotionRule(tablet *topodatapb.Tablet) promotionrule.
return promotionrule.MustNot
}
-// semiSyncAckers implements the Durabler interface
-func (d *durabilityTest) semiSyncAckers(tablet *topodatapb.Tablet) int {
+// SemiSyncAckers implements the Durabler interface
+func (d *durabilityTest) SemiSyncAckers(tablet *topodatapb.Tablet) int {
return 0
}
-// isReplicaSemiSync implements the Durabler interface
-func (d *durabilityTest) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
+// IsReplicaSemiSync implements the Durabler interface
+func (d *durabilityTest) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool {
return false
}
diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/durability_test.go
index 857718174c5..c519817c312 100644
--- a/go/vt/vtctl/reparentutil/durability_test.go
+++ b/go/vt/vtctl/reparentutil/durability_test.go
@@ -268,7 +268,7 @@ func TestDurabilityTest(t *testing.T) {
for _, testcase := range testcases {
t.Run(topoproto.TabletAliasString(testcase.tablet.Alias), func(t *testing.T) {
- rule := durabilityRules.promotionRule(testcase.tablet)
+ rule := durabilityRules.PromotionRule(testcase.tablet)
assert.Equal(t, testcase.promotionRule, rule)
})
}
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go
index ba846ebc147..95bbd254a58 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go
@@ -59,6 +59,7 @@ type EmergencyReparentOptions struct {
IgnoreReplicas sets.String
WaitReplicasTimeout time.Duration
PreventCrossCellPromotion bool
+ ExpectedPrimaryAlias *topodatapb.TabletAlias
// Private options managed internally. We use value passing to avoid leaking
// these details back out.
@@ -162,6 +163,13 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve
}
ev.ShardInfo = *shardInfo
+ if opts.ExpectedPrimaryAlias != nil && !topoproto.TabletAliasEqual(opts.ExpectedPrimaryAlias, shardInfo.PrimaryAlias) {
+ return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %s is not equal to expected alias %s",
+ topoproto.TabletAliasString(shardInfo.PrimaryAlias),
+ topoproto.TabletAliasString(opts.ExpectedPrimaryAlias),
+ )
+ }
+
keyspaceDurability, err := erp.ts.GetKeyspaceDurability(ctx, keyspace)
if err != nil {
return err
@@ -192,7 +200,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve
}
// Stop replication on all the tablets and build their status map
- stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, opts.WaitReplicasTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, erp.logger)
+ stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, topo.RemoteOperationTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, erp.logger)
if err != nil {
return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err)
}
@@ -238,7 +246,8 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve
// 2. Remove the tablets with the Must_not promote rule
// 3. Remove cross-cell tablets if PreventCrossCellPromotion is specified
// Our final primary candidate MUST belong to this list of valid candidates
- validCandidateTablets, err = erp.filterValidCandidates(validCandidateTablets, stoppedReplicationSnapshot.reachableTablets, prevPrimary, opts)
+ validCandidateTablets, err = erp.filterValidCandidates(validCandidateTablets,
+ stoppedReplicationSnapshot.reachableTablets, stoppedReplicationSnapshot.tabletsBackupState, prevPrimary, opts)
if err != nil {
return err
}
@@ -714,9 +723,12 @@ func (erp *EmergencyReparenter) promoteNewPrimary(
return nil
}
-// filterValidCandidates filters valid tablets, keeping only the ones which can successfully be promoted without any constraint failures and can make forward progress on being promoted
-func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb.Tablet, tabletsReachable []*topodatapb.Tablet, prevPrimary *topodatapb.Tablet, opts EmergencyReparentOptions) ([]*topodatapb.Tablet, error) {
+// filterValidCandidates filters valid tablets, keeping only the ones which can successfully be promoted without any
+// constraint failures and can make forward progress on being promoted. It will filter out candidates taking backups
+// if possible.
+func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb.Tablet, tabletsReachable []*topodatapb.Tablet, tabletsBackupState map[string]bool, prevPrimary *topodatapb.Tablet, opts EmergencyReparentOptions) ([]*topodatapb.Tablet, error) {
var restrictedValidTablets []*topodatapb.Tablet
+ var notPreferredValidTablets []*topodatapb.Tablet
for _, tablet := range validTablets {
tabletAliasStr := topoproto.TabletAliasString(tablet.Alias)
// Remove tablets which have MustNot promote rule since they must never be promoted
@@ -743,7 +755,18 @@ func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb
}
continue
}
- restrictedValidTablets = append(restrictedValidTablets, tablet)
+ // Put candidates that are running a backup in a separate list
+ backingUp, ok := tabletsBackupState[tabletAliasStr]
+ if ok && backingUp {
+ erp.logger.Infof("Setting %s in list of valid candidates taking a backup", tabletAliasStr)
+ notPreferredValidTablets = append(notPreferredValidTablets, tablet)
+ } else {
+ restrictedValidTablets = append(restrictedValidTablets, tablet)
+ }
}
- return restrictedValidTablets, nil
+ if len(restrictedValidTablets) > 0 {
+ return restrictedValidTablets, nil
+ }
+
+ return notPreferredValidTablets, nil
}
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
index 19d119a6ba2..32956ca9546 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
@@ -1895,6 +1895,55 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) {
shouldErr: true,
errShouldContain: "proposed primary zone1-0000000102 will not be able to make forward progress on being promoted",
},
+ {
+ name: "expected primary mismatch",
+ durability: "none",
+ emergencyReparentOps: EmergencyReparentOptions{
+ ExpectedPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ },
+ tmc: &testutil.TabletManagerClient{},
+ shards: []*vtctldatapb.Shard{
+ {
+ Keyspace: "testkeyspace",
+ Name: "-",
+ Shard: &topodatapb.Shard{
+ IsPrimaryServing: true,
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ },
+ },
+ },
+ tablets: []*topodatapb.Tablet{
+ {
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ Keyspace: "testkeyspace",
+ Shard: "-",
+ },
+ {
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ Keyspace: "testkeyspace",
+ Shard: "-",
+ },
+ },
+ keyspace: "testkeyspace",
+ shard: "-",
+ ts: memorytopo.NewServer("zone1"),
+ shouldErr: true,
+ errShouldContain: "primary zone1-0000000100 is not equal to expected alias zone1-0000000101",
+ },
}
for _, tt := range tests {
@@ -1934,7 +1983,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) {
err := erp.reparentShardLocked(ctx, ev, tt.keyspace, tt.shard, tt.emergencyReparentOps)
if tt.shouldErr {
assert.Error(t, err)
- assert.Contains(t, err.Error(), tt.errShouldContain)
+ assert.ErrorContains(t, err, tt.errShouldContain)
return
}
@@ -4316,27 +4365,54 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
}
)
allTablets := []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}
+ noTabletsTakingBackup := map[string]bool{
+ topoproto.TabletAliasString(primaryTablet.Alias): false, topoproto.TabletAliasString(replicaTablet.Alias): false,
+ topoproto.TabletAliasString(rdonlyTablet.Alias): false, topoproto.TabletAliasString(replicaCrossCellTablet.Alias): false,
+ topoproto.TabletAliasString(rdonlyCrossCellTablet.Alias): false,
+ }
+ replicaTakingBackup := map[string]bool{
+ topoproto.TabletAliasString(primaryTablet.Alias): false, topoproto.TabletAliasString(replicaTablet.Alias): true,
+ topoproto.TabletAliasString(rdonlyTablet.Alias): false, topoproto.TabletAliasString(replicaCrossCellTablet.Alias): false,
+ topoproto.TabletAliasString(rdonlyCrossCellTablet.Alias): false,
+ }
tests := []struct {
- name string
- durability string
- validTablets []*topodatapb.Tablet
- tabletsReachable []*topodatapb.Tablet
- prevPrimary *topodatapb.Tablet
- opts EmergencyReparentOptions
- filteredTablets []*topodatapb.Tablet
- errShouldContain string
+ name string
+ durability string
+ validTablets []*topodatapb.Tablet
+ tabletsReachable []*topodatapb.Tablet
+ tabletsTakingBackup map[string]bool
+ prevPrimary *topodatapb.Tablet
+ opts EmergencyReparentOptions
+ filteredTablets []*topodatapb.Tablet
+ errShouldContain string
}{
{
- name: "filter must not",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
- filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet},
+ name: "filter must not",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet},
+ }, {
+ name: "host taking backup must not be on the list when there are other candidates",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ tabletsTakingBackup: replicaTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet},
+ }, {
+ name: "host taking backup must be the only one on the list when there are no other candidates",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: []*topodatapb.Tablet{replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ tabletsTakingBackup: replicaTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{replicaTablet},
}, {
- name: "filter cross cell",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
+ name: "filter cross cell",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
prevPrimary: &topodatapb.Tablet{
Alias: &topodatapb.TabletAlias{
Cell: "zone-1",
@@ -4347,11 +4423,12 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
},
filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet},
}, {
- name: "filter establish",
- durability: "cross_cell",
- validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet},
- tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
- filteredTablets: nil,
+ name: "filter establish",
+ durability: "cross_cell",
+ validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet},
+ tabletsTakingBackup: noTabletsTakingBackup,
+ tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ filteredTablets: nil,
}, {
name: "filter mixed",
durability: "cross_cell",
@@ -4363,34 +4440,38 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
opts: EmergencyReparentOptions{
PreventCrossCellPromotion: true,
},
- validTablets: allTablets,
- tabletsReachable: allTablets,
- filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet},
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
+ filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet},
}, {
- name: "error - requested primary must not",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
+ name: "error - requested primary must not",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ tabletsTakingBackup: noTabletsTakingBackup,
opts: EmergencyReparentOptions{
NewPrimaryAlias: rdonlyTablet.Alias,
},
errShouldContain: "proposed primary zone-1-0000000003 has a must not promotion rule",
}, {
- name: "error - requested primary not in same cell",
- durability: "none",
- validTablets: allTablets,
- tabletsReachable: allTablets,
- prevPrimary: primaryTablet,
+ name: "error - requested primary not in same cell",
+ durability: "none",
+ validTablets: allTablets,
+ tabletsReachable: allTablets,
+ prevPrimary: primaryTablet,
+ tabletsTakingBackup: noTabletsTakingBackup,
opts: EmergencyReparentOptions{
PreventCrossCellPromotion: true,
NewPrimaryAlias: replicaCrossCellTablet.Alias,
},
errShouldContain: "proposed primary zone-2-0000000002 is is a different cell as the previous primary",
}, {
- name: "error - requested primary cannot establish",
- durability: "cross_cell",
- validTablets: allTablets,
- tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ name: "error - requested primary cannot establish",
+ durability: "cross_cell",
+ validTablets: allTablets,
+ tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet},
+ tabletsTakingBackup: noTabletsTakingBackup,
opts: EmergencyReparentOptions{
NewPrimaryAlias: primaryTablet.Alias,
},
@@ -4404,7 +4485,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) {
tt.opts.durability = durability
logger := logutil.NewMemoryLogger()
erp := NewEmergencyReparenter(nil, nil, logger)
- tabletList, err := erp.filterValidCandidates(tt.validTablets, tt.tabletsReachable, tt.prevPrimary, tt.opts)
+ tabletList, err := erp.filterValidCandidates(tt.validTablets, tt.tabletsReachable, tt.tabletsTakingBackup, tt.prevPrimary, tt.opts)
if tt.errShouldContain != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.errShouldContain)
diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go
index fc0e1c80a06..4f1579a07b9 100644
--- a/go/vt/vtctl/reparentutil/planned_reparenter.go
+++ b/go/vt/vtctl/reparentutil/planned_reparenter.go
@@ -50,9 +50,10 @@ type PlannedReparenter struct {
// operations. Options are passed by value, so it is safe for callers to mutate
// resue options structs for multiple calls.
type PlannedReparentOptions struct {
- NewPrimaryAlias *topodatapb.TabletAlias
- AvoidPrimaryAlias *topodatapb.TabletAlias
- WaitReplicasTimeout time.Duration
+ NewPrimaryAlias *topodatapb.TabletAlias
+ AvoidPrimaryAlias *topodatapb.TabletAlias
+ ExpectedPrimaryAlias *topodatapb.TabletAlias
+ WaitReplicasTimeout time.Duration
// Private options managed internally. We use value-passing semantics to
// set these options inside a PlannedReparent without leaking these details
@@ -524,6 +525,13 @@ func (pr *PlannedReparenter) reparentShardLocked(
return err
}
+ if opts.ExpectedPrimaryAlias != nil && !topoproto.TabletAliasEqual(opts.ExpectedPrimaryAlias, shardInfo.PrimaryAlias) {
+ return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %s is not equal to expected alias %s",
+ topoproto.TabletAliasString(shardInfo.PrimaryAlias),
+ topoproto.TabletAliasString(opts.ExpectedPrimaryAlias),
+ )
+ }
+
keyspaceDurability, err := pr.ts.GetKeyspaceDurability(ctx, keyspace)
if err != nil {
return err
diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go
index 5c79caeadb7..20804a0b625 100644
--- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go
+++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go
@@ -2616,6 +2616,7 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) {
ts *topo.Server
tmc tmclient.TabletManagerClient
tablets []*topodatapb.Tablet
+ shards []*vtctldatapb.Shard
unlockTopo bool
ev *events.Reparent
@@ -2623,8 +2624,9 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) {
shard string
opts PlannedReparentOptions
- shouldErr bool
- expectedEvent *events.Reparent
+ shouldErr bool
+ errShouldContain string
+ expectedEvent *events.Reparent
}{
{
name: "success: current primary cannot be determined", // "Case (1)"
@@ -3274,6 +3276,58 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) {
},
},
},
+ {
+ name: "expected primary mismatch",
+ ts: memorytopo.NewServer("zone1"),
+ tmc: &testutil.TabletManagerClient{},
+ shards: []*vtctldatapb.Shard{
+ {
+ Keyspace: "testkeyspace",
+ Name: "-",
+ Shard: &topodatapb.Shard{
+ IsPrimaryServing: true,
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ },
+ },
+ },
+ tablets: []*topodatapb.Tablet{
+ {
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ Keyspace: "testkeyspace",
+ Shard: "-",
+ },
+ {
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 200,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ Keyspace: "testkeyspace",
+ Shard: "-",
+ },
+ },
+
+ ev: &events.Reparent{},
+ keyspace: "testkeyspace",
+ shard: "-",
+ opts: PlannedReparentOptions{
+ // This is not the shard primary, so it should cause an error.
+ ExpectedPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 200,
+ },
+ },
+ shouldErr: true,
+ errShouldContain: "primary zone1-0000000100 is not equal to expected alias zone1-0000000200",
+ expectedEvent: nil,
+ },
}
ctx := context.Background()
@@ -3290,9 +3344,13 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) {
testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{
AlsoSetShardPrimary: true,
ForceSetShardPrimary: true, // Some of our test cases count on having multiple primaries, so let the last one "win".
- SkipShardCreation: false,
+ SkipShardCreation: len(tt.shards) > 0,
}, tt.tablets...)
+ if len(tt.shards) > 0 {
+ testutil.AddShards(ctx, t, tt.ts, tt.shards...)
+ }
+
if !tt.unlockTopo {
lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for testing")
require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard)
@@ -3316,7 +3374,7 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) {
err := pr.reparentShardLocked(ctx, tt.ev, tt.keyspace, tt.shard, tt.opts)
if tt.shouldErr {
assert.Error(t, err)
-
+ assert.ErrorContains(t, err, tt.errShouldContain)
return
}
diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go
index 8c905038bd5..ff712f82ce2 100644
--- a/go/vt/vtctl/reparentutil/replication.go
+++ b/go/vt/vtctl/reparentutil/replication.go
@@ -28,6 +28,8 @@ import (
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
+ replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -35,9 +37,6 @@ import (
"vitess.io/vitess/go/vt/topotools/events"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tmclient"
-
- replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
// FindValidEmergencyReparentCandidates will find candidates for an emergency
@@ -201,9 +200,10 @@ func SetReplicationSource(ctx context.Context, ts *topo.Server, tmc tmclient.Tab
// replicationSnapshot stores the status maps and the tablets that were reachable
// when trying to stopReplicationAndBuildStatusMaps.
type replicationSnapshot struct {
- statusMap map[string]*replicationdatapb.StopReplicationStatus
- primaryStatusMap map[string]*replicationdatapb.PrimaryStatus
- reachableTablets []*topodatapb.Tablet
+ statusMap map[string]*replicationdatapb.StopReplicationStatus
+ primaryStatusMap map[string]*replicationdatapb.PrimaryStatus
+ reachableTablets []*topodatapb.Tablet
+ tabletsBackupState map[string]bool
}
// stopReplicationAndBuildStatusMaps stops replication on all replicas, then
@@ -215,7 +215,7 @@ func stopReplicationAndBuildStatusMaps(
tmc tmclient.TabletManagerClient,
ev *events.Reparent,
tabletMap map[string]*topo.TabletInfo,
- waitReplicasTimeout time.Duration,
+ stopReplicationTimeout time.Duration,
ignoredTablets sets.String,
tabletToWaitFor *topodatapb.TabletAlias,
durability Durabler,
@@ -228,13 +228,14 @@ func stopReplicationAndBuildStatusMaps(
errChan = make(chan concurrency.Error)
allTablets []*topodatapb.Tablet
res = &replicationSnapshot{
- statusMap: map[string]*replicationdatapb.StopReplicationStatus{},
- primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
- reachableTablets: []*topodatapb.Tablet{},
+ statusMap: map[string]*replicationdatapb.StopReplicationStatus{},
+ primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
+ reachableTablets: []*topodatapb.Tablet{},
+ tabletsBackupState: map[string]bool{},
}
)
- groupCtx, groupCancel := context.WithTimeout(ctx, waitReplicasTimeout)
+ groupCtx, groupCancel := context.WithTimeout(ctx, stopReplicationTimeout)
defer groupCancel()
fillStatus := func(alias string, tabletInfo *topo.TabletInfo, mustWaitForTablet bool) {
@@ -272,6 +273,20 @@ func stopReplicationAndBuildStatusMaps(
err = vterrors.Wrapf(err, "error when getting replication status for alias %v: %v", alias, err)
}
} else {
+ isTakingBackup := false
+
+ // Prefer the most up-to-date information regarding whether the tablet is taking a backup from the After
+ // replication status, but fall back to the Before status if After is nil.
+ if stopReplicationStatus.After != nil {
+ isTakingBackup = stopReplicationStatus.After.BackupRunning
+ } else if stopReplicationStatus.Before != nil {
+ isTakingBackup = stopReplicationStatus.Before.BackupRunning
+ }
+
+ m.Lock()
+ res.tabletsBackupState[alias] = isTakingBackup
+ m.Unlock()
+
var sqlThreadRunning bool
// Check if the sql thread was running for the tablet
sqlThreadRunning, err = SQLThreadWasRunning(stopReplicationStatus)
@@ -312,8 +327,9 @@ func stopReplicationAndBuildStatusMaps(
errgroup := concurrency.ErrorGroup{
NumGoroutines: len(tabletMap) - ignoredTablets.Len(),
NumRequiredSuccesses: len(tabletMap) - ignoredTablets.Len() - 1,
- NumAllowedErrors: 1,
- NumErrorsToWaitFor: numErrorsToWaitFor,
+ NumAllowedErrors: len(tabletMap), // We set the number of allowed errors to a very high value, because we don't want to exit early
+ // even in case of multiple failures. We rely on the revoke function below to determine if we have more failures than we can tolerate
+ NumErrorsToWaitFor: numErrorsToWaitFor,
}
errRecorder := errgroup.Wait(groupCancel, errChan)
diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go
index 42b01cac770..01f043ac827 100644
--- a/go/vt/vtctl/reparentutil/replication_test.go
+++ b/go/vt/vtctl/reparentutil/replication_test.go
@@ -18,27 +18,33 @@ package reparentutil
import (
"context"
+ "os"
"testing"
"time"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "vitess.io/vitess/go/vt/vterrors"
-
- "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/sets"
+ _flag "vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/topotools/events"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tmclient"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
+func TestMain(m *testing.M) {
+ _flag.ParseFlagsForTest()
+ os.Exit(m.Run())
+}
+
func TestFindValidEmergencyReparentCandidates(t *testing.T) {
t.Parallel()
@@ -278,7 +284,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
durability string
tmc *stopReplicationAndBuildStatusMapsTestTMClient
tabletMap map[string]*topo.TabletInfo
- waitReplicasTimeout time.Duration
+ stopReplicasTimeout time.Duration
ignoredTablets sets.String
tabletToWaitFor *topodatapb.TabletAlias
expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus
@@ -796,7 +802,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
shouldErr: true, // we get multiple errors, so we fail
},
{
- name: "waitReplicasTimeout exceeded",
+ name: "stopReplicasTimeout exceeded",
durability: "none",
tmc: &stopReplicationAndBuildStatusMapsTestTMClient{
stopReplicationAndGetStatusDelays: map[string]time.Duration{
@@ -840,7 +846,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
},
},
},
- waitReplicasTimeout: time.Millisecond * 5,
+ stopReplicasTimeout: time.Millisecond * 5,
ignoredTablets: sets.NewString(),
expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{
"zone1-0000000101": {
@@ -1098,7 +1104,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
Uid: 102,
},
}},
- waitReplicasTimeout: time.Minute,
+ stopReplicasTimeout: time.Minute,
expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{},
shouldErr: false,
},
@@ -1110,7 +1116,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
durability, err := GetDurabilityPolicy(tt.durability)
require.NoError(t, err)
- res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.waitReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, logger)
+ res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, logger)
if tt.shouldErr {
assert.Error(t, err)
return
diff --git a/go/vt/vtctl/reparentutil/slack_cross_cell_shim.go b/go/vt/vtctl/reparentutil/slack_cross_cell_shim.go
new file mode 100644
index 00000000000..64d0cb8eda0
--- /dev/null
+++ b/go/vt/vtctl/reparentutil/slack_cross_cell_shim.go
@@ -0,0 +1,9 @@
+package reparentutil
+
+import "github.com/slackhq/vitess-addons/go/durability"
+
+func init() {
+ RegisterDurability("slack_cross_cell", func() Durabler {
+ return &durability.SlackCrossCell{}
+ })
+}
diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go
index f4cebc3dd7d..f48a283ce96 100644
--- a/go/vt/vtctl/reparentutil/util.go
+++ b/go/vt/vtctl/reparentutil/util.go
@@ -89,12 +89,16 @@ func ChooseNewPrimary(
go func(tablet *topodatapb.Tablet) {
defer wg.Done()
// find and store the positions for the tablet
- pos, err := findPositionForTablet(ctx, tablet, logger, tmc, waitReplicasTimeout)
+ pos, takingBackup, err := findPositionForTablet(ctx, tablet, logger, tmc, waitReplicasTimeout)
mu.Lock()
defer mu.Unlock()
if err == nil {
- validTablets = append(validTablets, tablet)
- tabletPositions = append(tabletPositions, pos)
+ if takingBackup {
+ log.Infof("%v is taking a backup", topoproto.TabletAliasString(tablet.Alias))
+ } else {
+ validTablets = append(validTablets, tablet)
+ tabletPositions = append(tabletPositions, pos)
+ }
}
}(tablet.Tablet)
}
@@ -106,7 +110,7 @@ func ChooseNewPrimary(
return nil, nil
}
- // sort the tablets for finding the best primary
+ // sort preferred tablets for finding the best primary
err := sortTabletsForReparent(validTablets, tabletPositions, durability)
if err != nil {
return nil, err
@@ -117,7 +121,7 @@ func ChooseNewPrimary(
// findPositionForTablet processes the replication position for a single tablet and
// returns it. It is safe to call from multiple goroutines.
-func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (mysql.Position, error) {
+func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (mysql.Position, bool, error) {
logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias))
ctx, cancel := context.WithTimeout(ctx, waitTimeout)
@@ -128,10 +132,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge
sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError)
if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNotReplica {
logger.Warningf("no replication statue from %v, using empty gtid set", topoproto.TabletAliasString(tablet.Alias))
- return mysql.Position{}, nil
+ return mysql.Position{}, false, nil
}
logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err)
- return mysql.Position{}, err
+ return mysql.Position{}, false, err
}
// Use the relay log position if available, otherwise use the executed GTID set (binary log position).
@@ -142,10 +146,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge
pos, err := mysql.DecodePosition(positionString)
if err != nil {
logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", positionString, topoproto.TabletAliasString(tablet.Alias), err)
- return mysql.Position{}, err
+ return mysql.Position{}, false, err
}
- return pos, nil
+ return pos, status.BackupRunning, nil
}
// FindCurrentPrimary returns the current primary tablet of a shard, if any. The
diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go
index 29f7bb4ab7d..fac0dba11fc 100644
--- a/go/vt/vtctl/reparentutil/util_test.go
+++ b/go/vt/vtctl/reparentutil/util_test.go
@@ -132,6 +132,107 @@ func TestChooseNewPrimary(t *testing.T) {
shouldErr: false,
},
{
+ name: "Two good replicas, but one of them is taking a backup so we pick the other one",
+ tmc: &chooseNewPrimaryTestTMClient{
+ // both zone1-101 and zone1-102 are equivalent from a replicaiton PoV, but zone1-102 is taking a backup
+ replicationStatuses: map[string]*replicationdatapb.Status{
+ "zone1-0000000101": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: true,
+ },
+ "zone1-0000000102": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: false,
+ },
+ },
+ },
+ shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }, nil),
+ tabletMap: map[string]*topo.TabletInfo{
+ "primary": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "replica1": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ "replica2": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 102,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ },
+ avoidPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 0,
+ },
+ expected: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 102,
+ },
+ },
+ {
+ name: "Only one replica, but it's taking a backup. We don't elect it.",
+ tmc: &chooseNewPrimaryTestTMClient{
+ // both zone1-101 and zone1-102 are equivalent from a replicaiton PoV, but zone1-102 is taking a backup
+ replicationStatuses: map[string]*replicationdatapb.Status{
+ "zone1-0000000101": {
+ Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ BackupRunning: true,
+ },
+ },
+ },
+ shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{
+ PrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ }, nil),
+ tabletMap: map[string]*topo.TabletInfo{
+ "primary": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ Type: topodatapb.TabletType_PRIMARY,
+ },
+ },
+ "replica1": {
+ Tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 101,
+ },
+ Type: topodatapb.TabletType_REPLICA,
+ },
+ },
+ },
+ avoidPrimaryAlias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 0,
+ },
+ expected: nil,
+ }, {
name: "found a replica - more advanced relay log position",
tmc: &chooseNewPrimaryTestTMClient{
// zone1-101 is behind zone1-102
@@ -459,11 +560,12 @@ func TestFindPositionForTablet(t *testing.T) {
ctx := context.Background()
logger := logutil.NewMemoryLogger()
tests := []struct {
- name string
- tmc *testutil.TabletManagerClient
- tablet *topodatapb.Tablet
- expectedPosition string
- expectedErr string
+ name string
+ tmc *testutil.TabletManagerClient
+ tablet *topodatapb.Tablet
+ expectedPosition string
+ expectedErr string
+ expectedTakingBackup bool
}{
{
name: "executed gtid set",
@@ -486,6 +588,30 @@ func TestFindPositionForTablet(t *testing.T) {
},
},
expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
+ }, {
+ name: "Host is taking a backup",
+ tmc: &testutil.TabletManagerClient{
+ ReplicationStatusResults: map[string]struct {
+ Position *replicationdatapb.Status
+ Error error
+ }{
+ "zone1-0000000100": {
+ Position: &replicationdatapb.Status{
+ Position: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
+ ReplicationLagSeconds: 201,
+ },
+ },
+ },
+ TabletsBackupState: map[string]bool{"zone1-0000000100": true},
+ },
+ tablet: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 100,
+ },
+ },
+ expectedTakingBackup: true,
+ expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
}, {
name: "no replication status",
tmc: &testutil.TabletManagerClient{
@@ -553,7 +679,7 @@ func TestFindPositionForTablet(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- pos, err := findPositionForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second)
+ pos, takingBackup, err := findPositionForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second)
if test.expectedErr != "" {
require.EqualError(t, err, test.expectedErr)
return
@@ -561,6 +687,7 @@ func TestFindPositionForTablet(t *testing.T) {
require.NoError(t, err)
posString := mysql.EncodePosition(pos)
require.Equal(t, test.expectedPosition, posString)
+ require.Equal(t, test.expectedTakingBackup, takingBackup)
})
}
}
diff --git a/go/vt/vtctl/topo.go b/go/vt/vtctl/topo.go
index 308d1f305f9..ed2d91e9cf2 100644
--- a/go/vt/vtctl/topo.go
+++ b/go/vt/vtctl/topo.go
@@ -21,18 +21,11 @@ import (
"encoding/json"
"fmt"
"os"
- "path"
"github.com/spf13/pflag"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/proto"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/wrangler"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vschemapb "vitess.io/vitess/go/vt/proto/vschema"
)
// This file contains the topo command group for vtctl.
@@ -57,59 +50,6 @@ func init() {
})
}
-// DecodeContent uses the filename to imply a type, and proto-decodes
-// the right object, then echoes it as a string.
-func DecodeContent(filename string, data []byte, json bool) (string, error) {
- name := path.Base(filename)
- dir := path.Dir(filename)
- var p proto.Message
- switch name {
- case topo.CellInfoFile:
- p = new(topodatapb.CellInfo)
- case topo.KeyspaceFile:
- p = new(topodatapb.Keyspace)
- case topo.ShardFile:
- p = new(topodatapb.Shard)
- case topo.VSchemaFile:
- p = new(vschemapb.Keyspace)
- case topo.ShardReplicationFile:
- p = new(topodatapb.ShardReplication)
- case topo.TabletFile:
- p = new(topodatapb.Tablet)
- case topo.SrvVSchemaFile:
- p = new(vschemapb.SrvVSchema)
- case topo.SrvKeyspaceFile:
- p = new(topodatapb.SrvKeyspace)
- case topo.RoutingRulesFile:
- p = new(vschemapb.RoutingRules)
- default:
- switch dir {
- case "/" + topo.GetExternalVitessClusterDir():
- p = new(topodatapb.ExternalVitessCluster)
- default:
- }
- if p == nil {
- if json {
- return "", fmt.Errorf("unknown topo protobuf type for %v", name)
- }
- return string(data), nil
- }
- }
-
- if err := proto.Unmarshal(data, p); err != nil {
- return string(data), err
- }
-
- var marshalled []byte
- var err error
- if json {
- marshalled, err = protojson.Marshal(p)
- } else {
- marshalled, err = prototext.Marshal(p)
- }
- return string(marshalled), err
-}
-
func commandTopoCat(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error {
cell := subFlags.String("cell", topo.GlobalCell, "topology cell to cat the file from. Defaults to global cell.")
long := subFlags.Bool("long", false, "long listing.")
@@ -214,7 +154,7 @@ func (d ProtoTopologyDecoder) decode(ctx context.Context, topoPaths []string, co
wr.Logger().Printf("path=%v version=%v\n", topoPath, version)
}
- decoded, err := DecodeContent(topoPath, data, false)
+ decoded, err := topo.DecodeContent(topoPath, data, false)
if err != nil {
wr.Logger().Warningf("TopoCat: cannot proto decode %v: %v", topoPath, err)
decoded = string(data)
@@ -269,7 +209,7 @@ func (d JSONTopologyDecoder) decode(ctx context.Context, topoPaths []string, con
continue
}
- decoded, err := DecodeContent(topoPath, data, true)
+ decoded, err := topo.DecodeContent(topoPath, data, true)
if err != nil {
hasError = true
wr.Logger().Printf("TopoCat: cannot proto decode %v: %v", topoPath, err)
diff --git a/go/vt/vtctl/vdiff2.go b/go/vt/vtctl/vdiff2.go
index e97b56edb68..83aa15ef763 100644
--- a/go/vt/vtctl/vdiff2.go
+++ b/go/vt/vtctl/vdiff2.go
@@ -577,7 +577,7 @@ func buildVDiff2SingleSummary(wr *wrangler.Wrangler, keyspace, workflow, uuid st
// on every shard.
if shardStateCounts[vdiff.StoppedState] > 0 {
summary.State = vdiff.StoppedState
- } else if tableStateCounts[vdiff.ErrorState] > 0 {
+ } else if shardStateCounts[vdiff.ErrorState] > 0 || tableStateCounts[vdiff.ErrorState] > 0 {
summary.State = vdiff.ErrorState
} else if tableStateCounts[vdiff.StartedState] > 0 {
summary.State = vdiff.StartedState
diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go
index 143740267a5..fa508dea79a 100644
--- a/go/vt/vtctl/vtctl.go
+++ b/go/vt/vtctl/vtctl.go
@@ -108,6 +108,12 @@ import (
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/proto/vttime"
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo"
@@ -116,13 +122,6 @@ import (
"vitess.io/vitess/go/vt/vtctl/workflow"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/wrangler"
-
- tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vschemapb "vitess.io/vitess/go/vt/proto/vschema"
- vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/proto/vttime"
)
var (
@@ -2715,7 +2714,8 @@ func commandSwitchWrites(ctx context.Context, wr *wrangler.Wrangler, subFlags *p
wr.Logger().Printf("*** SwitchWrites is deprecated. Consider using v2 commands instead, see https://vitess.io/docs/reference/vreplication/v2/ ***\n")
timeout := subFlags.Duration("timeout", 30*time.Second, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout.")
- filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "DEPRECATED Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout.")
+ filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout.")
+ _ = subFlags.MarkDeprecated("filtered_replication_wait_time", "Use --timeout instead.")
reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication")
cancel := subFlags.Bool("cancel", false, "Cancel the failed migration and serve from source")
reverse := subFlags.Bool("reverse", false, "Reverse a previous SwitchWrites serve from source")
diff --git a/go/vt/vtctl/workflow/state.go b/go/vt/vtctl/workflow/state.go
index 2841cd98a1a..613f82d0b43 100644
--- a/go/vt/vtctl/workflow/state.go
+++ b/go/vt/vtctl/workflow/state.go
@@ -41,5 +41,7 @@ type State struct {
WritesSwitched bool
// Partial MoveTables info
- WritesPartiallySwitched bool
+ IsPartialMigration bool
+ ShardsAlreadySwitched []string
+ ShardsNotYetSwitched []string
}
diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go
index 38eb2785d0d..9607e16d8bf 100644
--- a/go/vt/vtctld/api_test.go
+++ b/go/vt/vtctld/api_test.go
@@ -26,6 +26,8 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/wrangler"
@@ -462,29 +464,19 @@ func TestAPI(t *testing.T) {
switch in.method {
case "GET":
resp, err = http.Get(server.URL + apiPrefix + in.path)
+ require.NoError(t, err)
+ defer resp.Body.Close()
case "POST":
resp, err = http.Post(server.URL+apiPrefix+in.path, "application/json", strings.NewReader(in.body))
+ require.NoError(t, err)
+ defer resp.Body.Close()
default:
t.Fatalf("[%v] unknown method: %v", in.path, in.method)
- return
- }
-
- if err != nil {
- t.Fatalf("[%v] http error: %v", in.path, err)
- return
}
body, err := io.ReadAll(resp.Body)
- resp.Body.Close()
-
- if err != nil {
- t.Fatalf("[%v] io.ReadAll(resp.Body) error: %v", in.path, err)
- return
- }
-
- if resp.StatusCode != in.statusCode {
- t.Fatalf("[%v] got unexpected status code %d, want %d", in.path, resp.StatusCode, in.statusCode)
- }
+ require.NoError(t, err)
+ require.Equal(t, in.statusCode, resp.StatusCode)
got := compactJSON(body)
want := compactJSON([]byte(in.want))
diff --git a/go/vt/vtctld/explorer.go b/go/vt/vtctld/explorer.go
index 7c05c4e2fc3..1d9841e1536 100644
--- a/go/vt/vtctld/explorer.go
+++ b/go/vt/vtctld/explorer.go
@@ -28,7 +28,6 @@ import (
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/vtctl"
)
// backendExplorer is a class that uses the Backend interface of a
@@ -89,7 +88,7 @@ func (ex *backendExplorer) HandlePath(nodePath string, r *http.Request) *Result
case nil:
if len(data) > 0 {
// It has contents, we just use it if possible.
- decoded, err := vtctl.DecodeContent(relativePath, data, false)
+ decoded, err := topo.DecodeContent(relativePath, data, false)
if err != nil {
result.Error = err.Error()
} else {
diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go
index 40973864ddc..462ffdd239f 100644
--- a/go/vt/vtctld/vtctld.go
+++ b/go/vt/vtctld/vtctld.go
@@ -60,11 +60,15 @@ func init() {
func registerVtctldFlags(fs *pflag.FlagSet) {
fs.BoolVar(&enableRealtimeStats, "enable_realtime_stats", enableRealtimeStats, "Required for the Realtime Stats view. If set, vtctld will maintain a streaming RPC to each tablet (in all cells) to gather the realtime health stats.")
+ fs.MarkDeprecated("enable_realtime_stats", "It is used by old vtctl UI that is already deprecated.")
fs.BoolVar(&enableUI, "enable_vtctld_ui", enableUI, "If true, the vtctld web interface will be enabled. Default is true.")
+ fs.MarkDeprecated("enable_vtctld_ui", "It is used by old vtctl UI that is already deprecated.")
fs.StringVar(&durabilityPolicy, "durability_policy", durabilityPolicy, "type of durability to enforce. Default is none. Other values are dictated by registered plugins")
fs.BoolVar(&sanitizeLogMessages, "vtctld_sanitize_log_messages", sanitizeLogMessages, "When true, vtctld sanitizes logging.")
fs.StringVar(&webDir, "web_dir", webDir, "NOT USED, here for backward compatibility")
+ fs.MarkDeprecated("web_dir", "it will be removed in a future releases.")
fs.StringVar(&webDir2, "web_dir2", webDir2, "NOT USED, here for backward compatibility")
+ fs.MarkDeprecated("web_dir2", "it will be removed in a future releases.")
}
// InitVtctld initializes all the vtctld functionality.
@@ -159,7 +163,7 @@ func InitVtctld(ts *topo.Server) error {
if err != nil {
log.Errorf("Failed to get the list of known cells, failed to instantiate the healthcheck at startup: %v", err)
} else {
- healthCheck = discovery.NewHealthCheck(ctx, *vtctl.HealthcheckRetryDelay, *vtctl.HealthCheckTimeout, ts, localCell, strings.Join(cells, ","))
+ healthCheck = discovery.NewHealthCheck(ctx, *vtctl.HealthcheckRetryDelay, *vtctl.HealthCheckTimeout, ts, localCell, strings.Join(cells, ","), nil)
}
}
diff --git a/go/vt/vtctld/workflow.go b/go/vt/vtctld/workflow.go
index bafc6ad8672..ef9b4f3b1c0 100644
--- a/go/vt/vtctld/workflow.go
+++ b/go/vt/vtctld/workflow.go
@@ -41,8 +41,11 @@ var (
func registerVtctldWorkflowFlags(fs *pflag.FlagSet) {
fs.BoolVar(&workflowManagerInit, "workflow_manager_init", workflowManagerInit, "Initialize the workflow manager in this vtctld instance.")
+ fs.MarkDeprecated("workflow_manager_init", "it will be removed in a future releases.")
fs.BoolVar(&workflowManagerUseElection, "workflow_manager_use_election", workflowManagerUseElection, "if specified, will use a topology server-based master election to ensure only one workflow manager is active at a time.")
+ fs.MarkDeprecated("workflow_manager_use_election", "it will be removed in a future releases.")
fs.StringSliceVar(&workflowManagerDisable, "workflow_manager_disable", workflowManagerDisable, "comma separated list of workflow types to disable")
+ fs.MarkDeprecated("workflow_manager_disable", "it will be removed in a future releases.")
}
func init() {
diff --git a/go/vt/vterrors/last_error.go b/go/vt/vterrors/last_error.go
new file mode 100644
index 00000000000..1f051825041
--- /dev/null
+++ b/go/vt/vterrors/last_error.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vterrors
+
+import (
+ "sync"
+ "time"
+
+ "vitess.io/vitess/go/vt/log"
+)
+
+/*
+ * LastError tracks the most recent error for any ongoing process and how long it has persisted.
+ * The err field should be a vterror to ensure we have meaningful error codes, causes, stack
+ * traces, etc.
+ */
+type LastError struct {
+ name string
+ err error
+ firstSeen time.Time
+ lastSeen time.Time
+ mu sync.Mutex
+ maxTimeInError time.Duration // if error persists for this long, shouldRetry() will return false
+}
+
+func NewLastError(name string, maxTimeInError time.Duration) *LastError {
+ return &LastError{
+ name: name,
+ maxTimeInError: maxTimeInError,
+ }
+}
+
+func (le *LastError) Record(err error) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ if err == nil {
+ le.err = nil
+ le.firstSeen = time.Time{}
+ le.lastSeen = time.Time{}
+ return
+ }
+ if !Equals(err, le.err) {
+ le.firstSeen = time.Now()
+ le.lastSeen = time.Now()
+ le.err = err
+ } else {
+ // same error seen
+ if time.Since(le.lastSeen) > le.maxTimeInError {
+ // reset firstSeen, since it has been long enough since the last time we saw this error
+ log.Infof("Resetting firstSeen for %s, since it is too long since the last one", le.name)
+ le.firstSeen = time.Now()
+ }
+ le.lastSeen = time.Now()
+ }
+}
+
+func (le *LastError) ShouldRetry() bool {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ if le.maxTimeInError == 0 {
+ // The value of 0 means "no time limit"
+ return true
+ }
+ if le.firstSeen.IsZero() {
+ return true
+ }
+ if time.Since(le.firstSeen) <= le.maxTimeInError {
+ // within the max time range
+ return true
+ }
+ log.Errorf("%s: the same error was encountered continuously since %s, it is now assumed to be unrecoverable; any affected operations will need to be manually restarted once error '%s' has been addressed",
+ le.name, le.firstSeen.UTC(), le.err)
+ return false
+}
diff --git a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt
index 8bb3ecef970..0adc5661077 100644
--- a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt
+++ b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt
@@ -45,3 +45,8 @@ select ID from t1
1 ks_unsharded/-: select ID from t1 limit 10001
----------------------------------------------------------------------
+select t1.id, t2.c2 from t1 join t2 on t1.id = t2.t1_id where t2.c2 in (1)
+
+1 ks_unsharded/-: select t1.id, t2.c2 from t1 join t2 on t1.id = t2.t1_id where t2.c2 in (1) limit 10001
+
+----------------------------------------------------------------------
diff --git a/go/vt/vtexplain/testdata/test-schema.sql b/go/vt/vtexplain/testdata/test-schema.sql
index 5b65334796f..d5f9fbad56f 100644
--- a/go/vt/vtexplain/testdata/test-schema.sql
+++ b/go/vt/vtexplain/testdata/test-schema.sql
@@ -4,6 +4,12 @@ create table t1 (
floatval float not null default 0,
primary key (id)
);
+create table t2 (
+ id bigint(20) unsigned not null,
+ t1_id bigint(20) unsigned not null default 0,
+ c2 bigint(20) null,
+ primary key (id)
+);
create table user (
id bigint,
diff --git a/go/vt/vtexplain/testdata/test-vschema.json b/go/vt/vtexplain/testdata/test-vschema.json
index a50e11e92ae..f4350efa56d 100644
--- a/go/vt/vtexplain/testdata/test-vschema.json
+++ b/go/vt/vtexplain/testdata/test-vschema.json
@@ -3,6 +3,7 @@
"sharded": false,
"tables": {
"t1": {},
+ "t2": {},
"table_not_in_schema": {}
}
},
diff --git a/go/vt/vtexplain/testdata/unsharded-queries.sql b/go/vt/vtexplain/testdata/unsharded-queries.sql
index f0147ac5d6e..712245f3338 100644
--- a/go/vt/vtexplain/testdata/unsharded-queries.sql
+++ b/go/vt/vtexplain/testdata/unsharded-queries.sql
@@ -6,3 +6,4 @@ update t1 set floatval = 9.99;
delete from t1 where id = 100;
insert into t1 (id,intval,floatval) values (1,2,3.14) on duplicate key update intval=3, floatval=3.14;
select ID from t1;
+select t1.id, t2.c2 from t1 join t2 on t1.id = t2.t1_id where t2.c2 in (1);
\ No newline at end of file
diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go
index 21fc30cbd4f..8145c59b44d 100644
--- a/go/vt/vtexplain/vtexplain_test.go
+++ b/go/vt/vtexplain/vtexplain_test.go
@@ -283,14 +283,14 @@ func TestJSONOutput(t *testing.T) {
}
}
-func testShardInfo(ks, start, end string, t *testing.T) *topo.ShardInfo {
+func testShardInfo(ks, start, end string, primaryServing bool, t *testing.T) *topo.ShardInfo {
kr, err := key.ParseKeyRangeParts(start, end)
require.NoError(t, err)
return topo.NewShardInfo(
ks,
fmt.Sprintf("%s-%s", start, end),
- &topodata.Shard{KeyRange: kr},
+ &topodata.Shard{KeyRange: kr, IsPrimaryServing: primaryServing},
&vtexplainTestTopoVersion{},
)
}
@@ -304,14 +304,17 @@ func TestUsingKeyspaceShardMap(t *testing.T) {
testcase: "select-sharded-8",
ShardRangeMap: map[string]map[string]*topo.ShardInfo{
"ks_sharded": {
- "-20": testShardInfo("ks_sharded", "", "20", t),
- "20-40": testShardInfo("ks_sharded", "20", "40", t),
- "40-60": testShardInfo("ks_sharded", "40", "60", t),
- "60-80": testShardInfo("ks_sharded", "60", "80", t),
- "80-a0": testShardInfo("ks_sharded", "80", "a0", t),
- "a0-c0": testShardInfo("ks_sharded", "a0", "c0", t),
- "c0-e0": testShardInfo("ks_sharded", "c0", "e0", t),
- "e0-": testShardInfo("ks_sharded", "e0", "", t),
+ "-20": testShardInfo("ks_sharded", "", "20", true, t),
+ "20-40": testShardInfo("ks_sharded", "20", "40", true, t),
+ "40-60": testShardInfo("ks_sharded", "40", "60", true, t),
+ "60-80": testShardInfo("ks_sharded", "60", "80", true, t),
+ "80-a0": testShardInfo("ks_sharded", "80", "a0", true, t),
+ "a0-c0": testShardInfo("ks_sharded", "a0", "c0", true, t),
+ "c0-e0": testShardInfo("ks_sharded", "c0", "e0", true, t),
+ "e0-": testShardInfo("ks_sharded", "e0", "", true, t),
+ // Some non-serving shards below - these should never be in the output of vtexplain
+ "-80": testShardInfo("ks_sharded", "", "80", false, t),
+ "80-": testShardInfo("ks_sharded", "80", "", false, t),
},
},
},
@@ -321,11 +324,15 @@ func TestUsingKeyspaceShardMap(t *testing.T) {
// Have mercy on the poor soul that has this keyspace sharding.
// But, hey, vtexplain still works so they have that going for them.
"ks_sharded": {
- "-80": testShardInfo("ks_sharded", "", "80", t),
- "80-90": testShardInfo("ks_sharded", "80", "90", t),
- "90-a0": testShardInfo("ks_sharded", "90", "a0", t),
- "a0-e8": testShardInfo("ks_sharded", "a0", "e8", t),
- "e8-": testShardInfo("ks_sharded", "e8", "", t),
+ "-80": testShardInfo("ks_sharded", "", "80", true, t),
+ "80-90": testShardInfo("ks_sharded", "80", "90", true, t),
+ "90-a0": testShardInfo("ks_sharded", "90", "a0", true, t),
+ "a0-e8": testShardInfo("ks_sharded", "a0", "e8", true, t),
+ "e8-": testShardInfo("ks_sharded", "e8", "", true, t),
+ // Plus some un-even shards that are not serving and which should never be in the output of vtexplain
+ "80-a0": testShardInfo("ks_sharded", "80", "a0", false, t),
+ "a0-a5": testShardInfo("ks_sharded", "a0", "a5", false, t),
+ "a5-": testShardInfo("ks_sharded", "a5", "", false, t),
},
},
},
diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go
index 7c1aa2dfdb6..ff39e79c47e 100644
--- a/go/vt/vtexplain/vtexplain_vtgate.go
+++ b/go/vt/vtexplain/vtexplain_vtgate.go
@@ -72,7 +72,7 @@ func (vte *VTExplain) initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts
streamSize := 10
var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests
- vte.vtgateExecutor = vtgate.NewExecutor(context.Background(), vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, cache.DefaultConfig, schemaTracker, false, opts.PlannerVersion)
+ vte.vtgateExecutor = vtgate.NewExecutor(context.Background(), vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, cache.DefaultConfig, schemaTracker, false, opts.PlannerVersion, false)
queryLogBufferSize := 10
vtgate.QueryLogger = streamlog.New("VTGate", queryLogBufferSize)
@@ -131,6 +131,14 @@ func (vte *VTExplain) buildTopology(opts *Options, vschemaStr string, ksShardMap
vte.explainTopo.KeyspaceShards[ks] = make(map[string]*topodatapb.ShardReference)
for _, shard := range shards {
+ // If the topology is in the middle of a reshard, there can be two shards covering the same key range (e.g.
+ // both source shard 80- and target shard 80-c0 cover the keyrange 80-c0). For the purposes of explain, we
+ // should only consider the one that is serving, hence we skip the ones not serving. Otherwise, vtexplain
+ // gives inconsistent results - sometimes it will route the query being explained to the source shard, and
+ // sometimes to the destination shard. See https://github.com/vitessio/vitess/issues/11632 .
+ if shardInfo, ok := ksShardMap[ks][shard.Name]; ok && !shardInfo.IsPrimaryServing {
+ continue
+ }
hostname := fmt.Sprintf("%s/%s", ks, shard.Name)
log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name)
diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go
index ee94946e5c1..05932f785a0 100644
--- a/go/vt/vtexplain/vtexplain_vttablet.go
+++ b/go/vt/vtexplain/vtexplain_vttablet.go
@@ -504,199 +504,230 @@ func (t *explainTablet) HandleQuery(c *mysql.Conn, query string, callback func(*
}
switch sqlparser.Preview(query) {
case sqlparser.StmtSelect:
- // Parse the select statement to figure out the table and columns
- // that were referenced so that the synthetic response has the
- // expected field names and types.
- stmt, err := sqlparser.Parse(query)
+ var err error
+ result, err = t.handleSelect(query)
if err != nil {
return err
}
-
- var selStmt *sqlparser.Select
- switch stmt := stmt.(type) {
- case *sqlparser.Select:
- selStmt = stmt
- case *sqlparser.Union:
- selStmt = sqlparser.GetFirstSelect(stmt)
- default:
- return fmt.Errorf("vtexplain: unsupported statement type +%v", reflect.TypeOf(stmt))
+ case sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtSet,
+ sqlparser.StmtSavepoint, sqlparser.StmtSRollback, sqlparser.StmtRelease:
+ result = &sqltypes.Result{}
+ case sqlparser.StmtShow:
+ result = &sqltypes.Result{Fields: sqltypes.MakeTestFields("", "")}
+ case sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete:
+ result = &sqltypes.Result{
+ RowsAffected: 1,
}
+ default:
+ return fmt.Errorf("unsupported query %s", query)
+ }
- // Gen4 supports more complex queries so we now need to
- // handle multiple FROM clauses
- tables := make([]*sqlparser.AliasedTableExpr, len(selStmt.From))
- for _, from := range selStmt.From {
- tables = append(tables, getTables(from)...)
- }
+ return callback(result)
+}
- tableColumnMap := map[sqlparser.IdentifierCS]map[string]querypb.Type{}
- for _, table := range tables {
- if table == nil {
- continue
- }
+func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) {
+ // Parse the select statement to figure out the table and columns
+ // that were referenced so that the synthetic response has the
+ // expected field names and types.
+ stmt, err := sqlparser.Parse(query)
+ if err != nil {
+ return nil, err
+ }
- tableName := sqlparser.String(sqlparser.GetTableName(table.Expr))
- columns, exists := t.vte.getGlobalTabletEnv().tableColumns[tableName]
- if !exists && tableName != "" && tableName != "dual" {
- return fmt.Errorf("unable to resolve table name %s", tableName)
- }
+ var selStmt *sqlparser.Select
+ switch stmt := stmt.(type) {
+ case *sqlparser.Select:
+ selStmt = stmt
+ case *sqlparser.Union:
+ selStmt = sqlparser.GetFirstSelect(stmt)
+ default:
+ return nil, fmt.Errorf("vtexplain: unsupported statement type +%v", reflect.TypeOf(stmt))
+ }
- colTypeMap := map[string]querypb.Type{}
+ // Gen4 supports more complex queries so we now need to
+ // handle multiple FROM clauses
+ tables := make([]*sqlparser.AliasedTableExpr, len(selStmt.From))
+ for _, from := range selStmt.From {
+ tables = append(tables, getTables(from)...)
+ }
- if table.As.IsEmpty() {
- tableColumnMap[sqlparser.GetTableName(table.Expr)] = colTypeMap
- } else {
- tableColumnMap[table.As] = colTypeMap
- }
+ tableColumnMap := map[sqlparser.IdentifierCS]map[string]querypb.Type{}
+ for _, table := range tables {
+ if table == nil {
+ continue
+ }
- for k, v := range columns {
- if colType, exists := colTypeMap[k]; exists {
- if colType != v {
- return fmt.Errorf("column type mismatch for column : %s, types: %d vs %d", k, colType, v)
- }
- continue
- }
- colTypeMap[k] = v
- }
+ tableName := sqlparser.String(sqlparser.GetTableName(table.Expr))
+ columns, exists := t.vte.getGlobalTabletEnv().tableColumns[tableName]
+ if !exists && tableName != "" && tableName != "dual" {
+ return nil, fmt.Errorf("unable to resolve table name %s", tableName)
+ }
+
+ colTypeMap := map[string]querypb.Type{}
+ if table.As.IsEmpty() {
+ tableColumnMap[sqlparser.GetTableName(table.Expr)] = colTypeMap
+ } else {
+ tableColumnMap[table.As] = colTypeMap
}
- colNames := make([]string, 0, 4)
- colTypes := make([]querypb.Type, 0, 4)
- for _, node := range selStmt.SelectExprs {
- switch node := node.(type) {
- case *sqlparser.AliasedExpr:
- colNames, colTypes = inferColTypeFromExpr(node.Expr, tableColumnMap, colNames, colTypes)
- case *sqlparser.StarExpr:
- if node.TableName.Name.IsEmpty() {
- // SELECT *
- for _, colTypeMap := range tableColumnMap {
- for col, colType := range colTypeMap {
- colNames = append(colNames, col)
- colTypes = append(colTypes, colType)
- }
- }
- } else {
- // SELECT tableName.*
- colTypeMap := tableColumnMap[node.TableName.Name]
- for col, colType := range colTypeMap {
- colNames = append(colNames, col)
- colTypes = append(colTypes, colType)
- }
+ for k, v := range columns {
+ if colType, exists := colTypeMap[k]; exists {
+ if colType != v {
+ return nil, fmt.Errorf("column type mismatch for column : %s, types: %d vs %d", k, colType, v)
}
+ continue
}
+ colTypeMap[k] = v
}
- // the query against lookup table is in-query, handle it specifically
- var inColName string
- inVal := make([]sqltypes.Value, 0, 10)
-
- rowCount := 1
- if selStmt.Where != nil {
- switch v := selStmt.Where.Expr.(type) {
- case *sqlparser.ComparisonExpr:
- if v.Operator == sqlparser.InOp {
- switch c := v.Left.(type) {
- case *sqlparser.ColName:
- colName := strings.ToLower(c.Name.String())
- colType := tableColumnMap[sqlparser.GetTableName(selStmt.From[0].(*sqlparser.AliasedTableExpr).Expr)][colName]
-
- switch values := v.Right.(type) {
- case sqlparser.ValTuple:
- for _, val := range values {
- switch v := val.(type) {
- case *sqlparser.Literal:
- value, err := evalengine.LiteralToValue(v)
- if err != nil {
- return err
- }
-
- // Cast the value in the tuple to the expected value of the column
- castedValue, err := evalengine.Cast(value, colType)
- if err != nil {
- return err
- }
-
- // Check if we have a duplicate value
- isNewValue := true
- for _, v := range inVal {
- result, err := evalengine.NullsafeCompare(v, value, collations.Default())
- if err != nil {
- return err
- }
-
- if result == 0 {
- isNewValue = false
- break
- }
- }
-
- if isNewValue {
- inVal = append(inVal, castedValue)
- }
- }
- }
- rowCount = len(inVal)
- }
- inColName = strings.ToLower(c.Name.String())
- }
- }
- }
+ }
+
+ colNames, colTypes := t.analyzeExpressions(selStmt, tableColumnMap)
+
+ inColName, inVal, rowCount, s, err := t.analyzeWhere(selStmt, tableColumnMap)
+ if err != nil {
+ return s, err
+ }
+
+ fields := make([]*querypb.Field, len(colNames))
+ rows := make([][]sqltypes.Value, 0, rowCount)
+ for i, col := range colNames {
+ colType := colTypes[i]
+ fields[i] = &querypb.Field{
+ Name: col,
+ Type: colType,
}
+ }
- fields := make([]*querypb.Field, len(colNames))
- rows := make([][]sqltypes.Value, 0, rowCount)
+ for j := 0; j < rowCount; j++ {
+ values := make([]sqltypes.Value, len(colNames))
for i, col := range colNames {
+ // Generate a fake value for the given column. For the column in the IN clause,
+ // use the provided values in the query, For numeric types,
+ // use the column index. For all other types, just shortcut to using
+ // a string type that encodes the column name + index.
colType := colTypes[i]
- fields[i] = &querypb.Field{
- Name: col,
- Type: colType,
+ if len(inVal) > j && col == inColName {
+ values[i], _ = sqltypes.NewValue(querypb.Type_VARBINARY, inVal[j].Raw())
+ } else if sqltypes.IsIntegral(colType) {
+ values[i] = sqltypes.NewInt32(int32(i + 1))
+ } else if sqltypes.IsFloat(colType) {
+ values[i] = sqltypes.NewFloat64(1.0 + float64(i))
+ } else {
+ values[i] = sqltypes.NewVarChar(fmt.Sprintf("%s_val_%d", col, i+1))
}
}
+ rows = append(rows, values)
+ }
+ result := &sqltypes.Result{
+ Fields: fields,
+ InsertID: 0,
+ Rows: rows,
+ }
- for j := 0; j < rowCount; j++ {
- values := make([]sqltypes.Value, len(colNames))
- for i, col := range colNames {
- // Generate a fake value for the given column. For the column in the IN clause,
- // use the provided values in the query, For numeric types,
- // use the column index. For all other types, just shortcut to using
- // a string type that encodes the column name + index.
- colType := colTypes[i]
- if len(inVal) > j && col == inColName {
- values[i], _ = sqltypes.NewValue(querypb.Type_VARBINARY, inVal[j].Raw())
- } else if sqltypes.IsIntegral(colType) {
- values[i] = sqltypes.NewInt32(int32(i + 1))
- } else if sqltypes.IsFloat(colType) {
- values[i] = sqltypes.NewFloat64(1.0 + float64(i))
- } else {
- values[i] = sqltypes.NewVarChar(fmt.Sprintf("%s_val_%d", col, i+1))
- }
+ resultJSON, _ := json.MarshalIndent(result, "", " ")
+ log.V(100).Infof("query %s result %s\n", query, string(resultJSON))
+ return result, nil
+}
+
+func (t *explainTablet) analyzeWhere(selStmt *sqlparser.Select, tableColumnMap map[sqlparser.IdentifierCS]map[string]querypb.Type) (inColName string, inVal []sqltypes.Value, rowCount int, result *sqltypes.Result, err error) {
+ // the query against lookup table is in-query, handle it specifically
+ rowCount = 1
+ if selStmt.Where == nil {
+ return
+ }
+ v, ok := selStmt.Where.Expr.(*sqlparser.ComparisonExpr)
+ if !ok || v.Operator != sqlparser.InOp {
+ return
+ }
+ c, ok := v.Left.(*sqlparser.ColName)
+ if !ok {
+ return
+ }
+ colName := strings.ToLower(c.Name.String())
+ colType := querypb.Type_VARCHAR
+ tableExpr := selStmt.From[0]
+ expr, ok := tableExpr.(*sqlparser.AliasedTableExpr)
+ if ok {
+ m := tableColumnMap[sqlparser.GetTableName(expr.Expr)]
+ if m != nil {
+ t, found := m[colName]
+ if found {
+ colType = t
}
- rows = append(rows, values)
}
- result = &sqltypes.Result{
- Fields: fields,
- InsertID: 0,
- Rows: rows,
+ }
+
+ values, ok := v.Right.(sqlparser.ValTuple)
+ if !ok {
+ return
+ }
+ for _, val := range values {
+ lit, ok := val.(*sqlparser.Literal)
+ if !ok {
+ continue
+ }
+ value, err := evalengine.LiteralToValue(lit)
+ if err != nil {
+ return "", nil, 0, nil, err
}
- resultJSON, _ := json.MarshalIndent(result, "", " ")
- log.V(100).Infof("query %s result %s\n", query, string(resultJSON))
+ // Cast the value in the tuple to the expected value of the column
+ castedValue, err := evalengine.Cast(value, colType)
+ if err != nil {
+ return "", nil, 0, nil, err
+ }
- case sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtSet,
- sqlparser.StmtSavepoint, sqlparser.StmtSRollback, sqlparser.StmtRelease:
- result = &sqltypes.Result{}
- case sqlparser.StmtShow:
- result = &sqltypes.Result{Fields: sqltypes.MakeTestFields("", "")}
- case sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete:
- result = &sqltypes.Result{
- RowsAffected: 1,
+ // Check if we have a duplicate value
+ isNewValue := true
+ for _, v := range inVal {
+ result, err := evalengine.NullsafeCompare(v, value, collations.Default())
+ if err != nil {
+ return "", nil, 0, nil, err
+ }
+
+ if result == 0 {
+ isNewValue = false
+ break
+ }
+ }
+
+ if isNewValue {
+ inVal = append(inVal, castedValue)
}
- default:
- return fmt.Errorf("unsupported query %s", query)
}
+ inColName = strings.ToLower(c.Name.String())
+ return inColName, inVal, rowCount, nil, nil
+}
- return callback(result)
+func (t *explainTablet) analyzeExpressions(selStmt *sqlparser.Select, tableColumnMap map[sqlparser.IdentifierCS]map[string]querypb.Type) ([]string, []querypb.Type) {
+ colNames := make([]string, 0, 4)
+ colTypes := make([]querypb.Type, 0, 4)
+ for _, node := range selStmt.SelectExprs {
+ switch node := node.(type) {
+ case *sqlparser.AliasedExpr:
+ colNames, colTypes = inferColTypeFromExpr(node.Expr, tableColumnMap, colNames, colTypes)
+ case *sqlparser.StarExpr:
+ if node.TableName.Name.IsEmpty() {
+ // SELECT *
+ for _, colTypeMap := range tableColumnMap {
+ for col, colType := range colTypeMap {
+ colNames = append(colNames, col)
+ colTypes = append(colTypes, colType)
+ }
+ }
+ } else {
+ // SELECT tableName.*
+ colTypeMap := tableColumnMap[node.TableName.Name]
+ for col, colType := range colTypeMap {
+ colNames = append(colNames, col)
+ colTypes = append(colTypes, colType)
+ }
+ }
+ }
+ }
+ return colNames, colTypes
}
func getTables(node sqlparser.SQLNode) []*sqlparser.AliasedTableExpr {
diff --git a/go/vt/vtgate/balancer/balancer.go b/go/vt/vtgate/balancer/balancer.go
new file mode 100644
index 00000000000..462ccfda901
--- /dev/null
+++ b/go/vt/vtgate/balancer/balancer.go
@@ -0,0 +1,369 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package balancer
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "net/http"
+ "sync"
+
+ "vitess.io/vitess/go/vt/discovery"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+)
+
+/*
+
+The tabletBalancer probabalistically orders the list of available tablets into
+a ranked order of preference in order to satisfy two high-level goals:
+
+1. Balance the load across the available replicas
+2. Prefer a replica in the same cell as the vtgate if possible
+
+In some topologies this is trivial to accomplish by simply preferring tablets in the
+local cell, assuming there are a proportional number of local tablets in each cell to
+satisfy the inbound traffic to the vtgates in that cell.
+
+However, for topologies with a relatively small number of tablets in each cell, a simple
+affinity algorithm does not effectively balance the load.
+
+As a simple example:
+
+ Given three cells with vtgates, four replicas spread into those cells, where each vtgate
+ receives an equal query share. If each routes only to its local cell, the tablets will be
+ unbalanced since two of them receive 1/3 of the queries, but the two replicas in the same
+ cell will only receive 1/6 of the queries.
+
+ Cell A: 1/3 --> vtgate --> 1/3 => vttablet
+
+ Cell B: 1/3 --> vtgate --> 1/3 => vttablet
+
+ Cell C: 1/3 --> vtgate --> 1/6 => vttablet
+ \-> 1/6 => vttablet
+
+Other topologies that can cause similar pathologies include cases where there may be cells
+containing replicas but no local vtgates, and/or cells that have only vtgates but no replicas.
+
+For these topologies, the tabletBalancer proportionally assigns the output flow to each tablet,
+preferring the local cell where possible, but only as long as the global query balance is
+maintained.
+
+To accomplish this goal, the balancer is given:
+
+* The list of cells that receive inbound traffic to vtgates
+* The local cell where the vtgate exists
+* The set of tablets and their cells (learned from discovery)
+
+The model assumes there is an equal probablility of a query coming from each vtgate cell, i.e.
+traffic is effectively load balanced between the cells with vtgates.
+
+Given that information, the balancer builds a simple model to determine how much query load
+would go to each tablet if vtgate only routed to its local cell. Then if any tablets are
+unbalanced, it shifts the desired allocation away from the local cell preference in order to
+even out the query load.
+
+Based on this global model, the vtgate then probabalistically picks a destination for each
+query to be sent and uses these weights to order the available tablets accordingly.
+
+Assuming each vtgate is configured with and discovers the same information about the topology,
+and the input flow is balanced across the vtgate cells (as mentioned above), then each vtgate
+should come the the same conclusion about the global flows, and cooperatively should
+converge on the desired balanced query load.
+
+*/
+
+type TabletBalancer interface {
+ // Randomly shuffle the tablets into an order for routing queries
+ ShuffleTablets(target *querypb.Target, tablets []*discovery.TabletHealth)
+
+ // Balancer debug page request
+ DebugHandler(w http.ResponseWriter, r *http.Request)
+}
+
+func NewTabletBalancer(localCell string, vtGateCells []string) TabletBalancer {
+ return &tabletBalancer{
+ localCell: localCell,
+ vtGateCells: vtGateCells,
+ allocations: map[discovery.KeyspaceShardTabletType]*targetAllocation{},
+ }
+}
+
+type tabletBalancer struct {
+ //
+ // Configuration
+ //
+
+ // The local cell for the vtgate
+ localCell string
+
+ // The set of cells that have vtgates
+ vtGateCells []string
+
+ // mu protects the allocation map
+ mu sync.Mutex
+
+ //
+ // Allocations for balanced mode, calculated once per target and invalidated
+ // whenever the topology changes.
+ //
+ allocations map[discovery.KeyspaceShardTabletType]*targetAllocation
+}
+
+type targetAllocation struct {
+ // Target flow per cell based on the number of tablets discovered in the cell
+ Target map[string]int // json:target
+
+ // Input flows allocated for each cell
+ Inflows map[string]int
+
+ // Output flows from each vtgate cell to each target cell
+ Outflows map[string]map[string]int
+
+ // Allocation routed to each tablet from the local cell used for ranking
+ Allocation map[uint32]int
+
+ // Tablets that local cell does not route to
+ Unallocated map[uint32]struct{}
+
+ // Total allocation which is basically 1,000,000 / len(vtgatecells)
+ TotalAllocation int
+}
+
+func (b *tabletBalancer) print() string {
+ allocations, _ := json.Marshal(&b.allocations)
+ return fmt.Sprintf("LocalCell: %s, VtGateCells: %s, allocations: %s",
+ b.localCell, b.vtGateCells, string(allocations))
+}
+
+func (b *tabletBalancer) DebugHandler(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ fmt.Fprintf(w, "Local Cell: %v\r\n", b.localCell)
+ fmt.Fprintf(w, "Vtgate Cells: %v\r\n", b.vtGateCells)
+
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ allocations, _ := json.MarshalIndent(b.allocations, "", " ")
+ fmt.Fprintf(w, "Allocations: %v\r\n", string(allocations))
+}
+
+// ShuffleTablets is the main entry point to the balancer.
+//
+// It shuffles the tablets into a preference list for routing a given query.
+// However, since all tablets should be healthy, the query will almost always go
+// to the first tablet in the list, so the balancer ranking algoritm randomly
+// shuffles the list to break ties, then chooses a weighted random selection
+// based on the balance alorithm to promote to the first in the set.
+func (b *tabletBalancer) ShuffleTablets(target *querypb.Target, tablets []*discovery.TabletHealth) {
+
+ numTablets := len(tablets)
+
+ allocationMap, totalAllocation := b.getAllocation(target, tablets)
+
+ rand.Shuffle(numTablets, func(i, j int) { tablets[i], tablets[j] = tablets[j], tablets[i] })
+
+ // Do another O(n) seek through the list to effect the weighted sample by picking
+ // a random point in the allocation space and seeking forward in the list of (randomized)
+ // tablets to that point, promoting the tablet to the head of the list.
+ r := rand.Intn(totalAllocation)
+ for i := 0; i < numTablets; i++ {
+ flow := allocationMap[tablets[i].Tablet.Alias.Uid]
+ if r < flow {
+ tablets[0], tablets[i] = tablets[i], tablets[0]
+ break
+ }
+ r -= flow
+ }
+}
+
+// To stick with integer arithmetic, use 1,000,000 as the full load
+const ALLOCATION = 1000000
+
+func (b *tabletBalancer) allocateFlows(allTablets []*discovery.TabletHealth) *targetAllocation {
+ // Initialization: Set up some data structures and derived values
+ a := targetAllocation{
+ Target: map[string]int{},
+ Inflows: map[string]int{},
+ Outflows: map[string]map[string]int{},
+ Allocation: map[uint32]int{},
+ Unallocated: map[uint32]struct{}{},
+ }
+ flowPerVtgateCell := ALLOCATION / len(b.vtGateCells)
+ flowPerTablet := ALLOCATION / len(allTablets)
+ cellExistsWithNoTablets := false
+
+ for _, th := range allTablets {
+ a.Target[th.Tablet.Alias.Cell] += flowPerTablet
+ }
+
+ //
+ // First pass: Allocate vtgate flow to the local cell where the vtgate exists
+ // and along the way figure out if there are any vtgates with no local tablets.
+ //
+ for _, cell := range b.vtGateCells {
+ outflow := map[string]int{}
+ target := a.Target[cell]
+
+ if target > 0 {
+ a.Inflows[cell] += flowPerVtgateCell
+ outflow[cell] = flowPerVtgateCell
+ } else {
+ cellExistsWithNoTablets = true
+ }
+
+ a.Outflows[cell] = outflow
+ }
+
+ //
+ // Figure out if there is a shortfall
+ //
+ underAllocated := make(map[string]int)
+ unbalancedFlow := 0
+ for cell, allocation := range a.Target {
+ if a.Inflows[cell] < allocation {
+ underAllocated[cell] = allocation - a.Inflows[cell]
+ unbalancedFlow += underAllocated[cell]
+ }
+ }
+
+ //
+ // Second pass: if there are any vtgates with no local tablets, allocate the underallocated amount
+ // proportionally to all cells that may need it
+ //
+ if cellExistsWithNoTablets {
+ for _, vtgateCell := range b.vtGateCells {
+ target := a.Target[vtgateCell]
+ if target != 0 {
+ continue
+ }
+
+ for underAllocatedCell, underAllocatedFlow := range underAllocated {
+ allocation := flowPerVtgateCell * underAllocatedFlow / unbalancedFlow
+ a.Inflows[underAllocatedCell] += allocation
+ a.Outflows[vtgateCell][underAllocatedCell] += allocation
+ }
+ }
+
+ // Recompute underallocated after these flows were assigned
+ unbalancedFlow = 0
+ underAllocated = make(map[string]int)
+ for cell, allocation := range a.Target {
+ if a.Inflows[cell] < allocation {
+ underAllocated[cell] = allocation - a.Inflows[cell]
+ unbalancedFlow += underAllocated[cell]
+ }
+ }
+ }
+
+ //
+ // Third pass: Shift remaining imbalance if any cell is over/under allocated after
+ // assigning local cell traffic and distributing load from cells without tablets.
+ //
+ if /* fudge for integer arithmetic */ unbalancedFlow > 10 {
+
+ // cells which are overallocated
+ overAllocated := make(map[string]int)
+ for cell, allocation := range a.Target {
+ if a.Inflows[cell] > allocation {
+ overAllocated[cell] = a.Inflows[cell] - allocation
+ }
+ }
+
+ // fmt.Printf("outflows %v over %v under %v\n", a.Outflows, overAllocated, underAllocated)
+
+ //
+ // For each overallocated cell, proportionally shift flow from targets that are overallocated
+ // to targets that are underallocated.
+ //
+ // Note this is an O(N^3) loop, but only over the cells which need adjustment.
+ //
+ for _, vtgateCell := range b.vtGateCells {
+ for underAllocatedCell, underAllocatedFlow := range underAllocated {
+ for overAllocatedCell, overAllocatedFlow := range overAllocated {
+
+ currentFlow := a.Outflows[vtgateCell][overAllocatedCell]
+ if currentFlow == 0 {
+ continue
+ }
+
+ // Shift a proportional fraction of the amount that the cell is currently allocated weighted
+ // by the fraction that this vtgate cell is already sending to the overallocated cell, and the
+ // fraction that the new target is underallocated
+ //
+ // Note that the operator order matters -- multiplications need to occur before divisions
+ // to avoid truncating the integer values.
+ shiftFlow := overAllocatedFlow * currentFlow * underAllocatedFlow / a.Inflows[overAllocatedCell] / unbalancedFlow
+
+ //fmt.Printf("shift %d %s %s -> %s (over %d current %d in %d under %d unbalanced %d) \n", shiftFlow, vtgateCell, overAllocatedCell, underAllocatedCell,
+ // overAllocatedFlow, currentFlow, a.Inflows[overAllocatedCell], underAllocatedFlow, unbalancedFlow)
+
+ a.Outflows[vtgateCell][overAllocatedCell] -= shiftFlow
+ a.Inflows[overAllocatedCell] -= shiftFlow
+
+ a.Inflows[underAllocatedCell] += shiftFlow
+ a.Outflows[vtgateCell][underAllocatedCell] += shiftFlow
+ }
+ }
+ }
+ }
+
+ //
+ // Finally, once the cell flows are all adjusted, figure out the local allocation to each
+ // tablet in the target cells
+ //
+ outflow := a.Outflows[b.localCell]
+ for _, tablet := range allTablets {
+ cell := tablet.Tablet.Alias.Cell
+ flow := outflow[cell]
+ if flow > 0 {
+ a.Allocation[tablet.Tablet.Alias.Uid] = flow * flowPerTablet / a.Target[cell]
+ a.TotalAllocation += flow * flowPerTablet / a.Target[cell]
+ } else {
+ a.Unallocated[tablet.Tablet.Alias.Uid] = struct{}{}
+ }
+ }
+
+ return &a
+}
+
+// getAllocation builds the allocation map if needed and returns a copy of the map
+func (b *tabletBalancer) getAllocation(target *querypb.Target, tablets []*discovery.TabletHealth) (map[uint32]int, int) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ allocation, exists := b.allocations[discovery.KeyFromTarget(target)]
+ if exists && (len(allocation.Allocation)+len(allocation.Unallocated)) == len(tablets) {
+ mismatch := false
+ for _, tablet := range tablets {
+ if _, ok := allocation.Allocation[tablet.Tablet.Alias.Uid]; !ok {
+ if _, ok := allocation.Unallocated[tablet.Tablet.Alias.Uid]; !ok {
+ mismatch = true
+ break
+ }
+ }
+ }
+ if !mismatch {
+ // No change in tablets for this target. Return computed allocation
+ return allocation.Allocation, allocation.TotalAllocation
+ }
+ }
+
+ allocation = b.allocateFlows(tablets)
+ b.allocations[discovery.KeyFromTarget(target)] = allocation
+
+ return allocation.Allocation, allocation.TotalAllocation
+}
diff --git a/go/vt/vtgate/balancer/balancer_test.go b/go/vt/vtgate/balancer/balancer_test.go
new file mode 100644
index 00000000000..1eb9e69fadf
--- /dev/null
+++ b/go/vt/vtgate/balancer/balancer_test.go
@@ -0,0 +1,371 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package balancer
+
+import (
+ "strconv"
+ "testing"
+
+ "vitess.io/vitess/go/vt/discovery"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/topo"
+)
+
+var nextTestTabletUID int
+
+func createTestTablet(cell string) *discovery.TabletHealth {
+ nextTestTabletUID++
+ tablet := topo.NewTablet(uint32(nextTestTabletUID), cell, strconv.Itoa(nextTestTabletUID))
+ tablet.PortMap["vt"] = 1
+ tablet.PortMap["grpc"] = 2
+ tablet.Keyspace = "k"
+ tablet.Shard = "s"
+
+ return &discovery.TabletHealth{
+ Tablet: tablet,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
+ Serving: false,
+ Stats: nil,
+ PrimaryTermStartTime: 0,
+ }
+}
+
+// allow 2% fuzz
+const FUZZ = 2
+
+func fuzzyEquals(a, b int) bool {
+ diff := a - b
+ if diff < 0 {
+ diff = -diff
+ }
+ return diff < a*FUZZ/100
+}
+
+func TestAllocateFlows(t *testing.T) {
+ cases := []struct {
+ test string
+ tablets []*discovery.TabletHealth
+ vtgateCells []string
+ }{
+ {
+ "balanced one tablet per cell",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ },
+ []string{"a", "b", "c", "d"},
+ },
+ {
+ "balanced multiple tablets per cell",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ },
+ []string{"a", "b", "c", "d"},
+ },
+ {
+ "vtgate in cell with no tablets",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ },
+ []string{"a", "b", "c", "d", "e"},
+ },
+ {
+ "vtgates in multiple cells with no tablets",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ },
+ []string{"a", "b", "c", "d", "e", "f", "g"},
+ },
+ {
+ "imbalanced multiple tablets in one cell",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ },
+ []string{"a", "b", "c"},
+ },
+ {
+ "imbalanced multiple tablets in multiple cells",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ createTestTablet("d"),
+ createTestTablet("d"),
+ createTestTablet("d"),
+ },
+ []string{"a", "b", "c", "d"},
+ },
+ {
+ "heavy imbalance",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("c"),
+ },
+ []string{"a", "b", "c", "d"},
+ },
+ }
+
+ target := &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}
+
+ for _, c := range cases {
+ t.Logf("\n\nTest Case: %s\n\n", c.test)
+
+ tablets := c.tablets
+ vtGateCells := c.vtgateCells
+
+ tabletsByCell := make(map[string][]*discovery.TabletHealth)
+ for _, tablet := range tablets {
+ cell := tablet.Tablet.Alias.Cell
+ tabletsByCell[cell] = append(tabletsByCell[cell], tablet)
+ }
+
+ allocationPerTablet := make(map[uint32]int)
+ expectedPerTablet := ALLOCATION / len(tablets)
+
+ expectedPerCell := make(map[string]int)
+ for cell := range tabletsByCell {
+ expectedPerCell[cell] = ALLOCATION / len(tablets) * len(tabletsByCell[cell])
+ }
+
+ // Run the balancer over each vtgate cell
+ for _, localCell := range vtGateCells {
+ b := NewTabletBalancer(localCell, vtGateCells).(*tabletBalancer)
+ a := b.allocateFlows(tablets)
+ b.allocations[discovery.KeyFromTarget(target)] = a
+
+ t.Logf("Target Flows %v, Balancer: %s XXX %d %v \n", expectedPerCell, b.print(), len(b.allocations), b.allocations)
+
+ // Accumulate all the output per tablet cell
+ outflowPerCell := make(map[string]int)
+ for _, outflow := range a.Outflows {
+ for tabletCell, flow := range outflow {
+ if flow < 0 {
+ t.Errorf("balancer %v negative outflow", b.print())
+ }
+ outflowPerCell[tabletCell] += flow
+ }
+ }
+
+ // Check in / out flow to each tablet cell
+ for cell := range tabletsByCell {
+ expectedForCell := expectedPerCell[cell]
+
+ if !fuzzyEquals(a.Inflows[cell], expectedForCell) || !fuzzyEquals(outflowPerCell[cell], expectedForCell) {
+ t.Errorf("Balancer {%s} ExpectedPerCell {%v} did not allocate correct flow to cell %s: expected %d, inflow %d outflow %d",
+ b.print(), expectedPerCell, cell, expectedForCell, a.Inflows[cell], outflowPerCell[cell])
+ }
+ }
+
+ // Accumulate the allocations for all runs to compare what the system does as a whole
+ // when routing from all vtgate cells
+ for uid, flow := range a.Allocation {
+ allocationPerTablet[uid] += flow
+ }
+ }
+
+ // Check that the allocations all add up
+ for _, tablet := range tablets {
+ uid := tablet.Tablet.Alias.Uid
+
+ allocation := allocationPerTablet[uid]
+ if !fuzzyEquals(allocation, expectedPerTablet) {
+ t.Errorf("did not allocate full allocation to tablet %d: expected %d got %d",
+ uid, expectedPerTablet, allocation)
+ }
+ }
+ }
+}
+
+func TestBalancedShuffle(t *testing.T) {
+ cases := []struct {
+ test string
+ tablets []*discovery.TabletHealth
+ vtgateCells []string
+ }{
+ {
+ "simple balanced",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ },
+
+ []string{"a", "b", "c", "d"},
+ },
+ {
+ "simple unbalanced",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("d"),
+ },
+
+ []string{"a", "b", "c", "d"},
+ },
+ {
+ "mixed unbalanced",
+ []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("c"),
+ createTestTablet("c"),
+ },
+
+ []string{"a", "b", "c", "d"},
+ },
+ }
+
+ target := &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}
+ for _, c := range cases {
+ t.Logf("\n\nTest Case: %s\n\n", c.test)
+
+ tablets := c.tablets
+ vtGateCells := c.vtgateCells
+
+ // test unbalanced distribution
+
+ routed := make(map[uint32]int)
+
+ expectedPerCell := make(map[string]int)
+ for _, tablet := range tablets {
+ cell := tablet.Tablet.Alias.Cell
+ expectedPerCell[cell] += ALLOCATION / len(tablets)
+ }
+
+ // Run the algorithm a bunch of times to get a random enough sample
+ N := 1000000
+ for _, localCell := range vtGateCells {
+ b := NewTabletBalancer(localCell, vtGateCells).(*tabletBalancer)
+
+ for i := 0; i < N/len(vtGateCells); i++ {
+ b.ShuffleTablets(target, tablets)
+ if i == 0 {
+ t.Logf("Target Flows %v, Balancer: %s\n", expectedPerCell, b.print())
+ t.Logf(b.print())
+ }
+
+ routed[tablets[0].Tablet.Alias.Uid]++
+ }
+ }
+
+ expected := N / len(tablets)
+ delta := make(map[uint32]int)
+ for _, tablet := range tablets {
+ got := routed[tablet.Tablet.Alias.Uid]
+ delta[tablet.Tablet.Alias.Uid] = got - expected
+ if !fuzzyEquals(got, expected) {
+ t.Errorf("routing to tablet %d got %d expected %d", tablet.Tablet.Alias.Uid, got, expected)
+ }
+ }
+ t.Logf("Expected %d per tablet, Routed %v, Delta %v, Max delta %d", N/len(tablets), routed, delta, expected*FUZZ/100)
+ }
+}
+
+func TestTopologyChanged(t *testing.T) {
+ allTablets := []*discovery.TabletHealth{
+ createTestTablet("a"),
+ createTestTablet("a"),
+ createTestTablet("b"),
+ createTestTablet("b"),
+ }
+ target := &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}
+
+ b := NewTabletBalancer("b", []string{"a", "b"}).(*tabletBalancer)
+
+ N := 1
+
+ // initially create a slice of tablets with just the two in cell a
+ tablets := allTablets
+ tablets = tablets[0:2]
+
+ for i := 0; i < N; i++ {
+ b.ShuffleTablets(target, tablets)
+ allocation, totalAllocation := b.getAllocation(target, tablets)
+
+ if totalAllocation != ALLOCATION/2 {
+ t.Errorf("totalAllocation mismatch %s", b.print())
+ }
+
+ if allocation[allTablets[0].Tablet.Alias.Uid] != ALLOCATION/4 {
+ t.Errorf("allocation mismatch %s, cell %s", b.print(), allTablets[0].Tablet.Alias.Cell)
+ }
+
+ if tablets[0].Tablet.Alias.Cell != "a" {
+ t.Errorf("shuffle promoted wrong tablet from cell %s", tablets[0].Tablet.Alias.Cell)
+ }
+ }
+
+ // Run again with the full topology. Now traffic should go to cell b
+ for i := 0; i < N; i++ {
+ b.ShuffleTablets(target, allTablets)
+
+ allocation, totalAllocation := b.getAllocation(target, allTablets)
+
+ if totalAllocation != ALLOCATION/2 {
+ t.Errorf("totalAllocation mismatch %s", b.print())
+ }
+
+ if allocation[allTablets[0].Tablet.Alias.Uid] != ALLOCATION/4 {
+ t.Errorf("allocation mismatch %s, cell %s", b.print(), allTablets[0].Tablet.Alias.Cell)
+ }
+
+ if allTablets[0].Tablet.Alias.Cell != "b" {
+ t.Errorf("shuffle promoted wrong tablet from cell %s", allTablets[0].Tablet.Alias.Cell)
+ }
+ }
+}
diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go
index b1f8c4538ec..c8ad1c9343f 100644
--- a/go/vt/vtgate/buffer/shard_buffer.go
+++ b/go/vt/vtgate/buffer/shard_buffer.go
@@ -480,7 +480,9 @@ func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillS
sb.mu.Lock()
defer sb.mu.Unlock()
- log.Infof("disruption in shard %s/%s resolved (serving: %v)", sb.keyspace, sb.shard, stillServing)
+ // Disable it due to log storm in production
+ // thread https://slack-pde.slack.com/archives/C06CPL4HMED/p1729896804879749
+ // log.Infof("disruption in shard %s/%s resolved (serving: %v)", sb.keyspace, sb.shard, stillServing)
if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) {
if sb.currentPrimary != nil {
diff --git a/go/vt/vtgate/endtoend/main_test.go b/go/vt/vtgate/endtoend/main_test.go
index 046af36a3dd..28ea5d0f7ec 100644
--- a/go/vt/vtgate/endtoend/main_test.go
+++ b/go/vt/vtgate/endtoend/main_test.go
@@ -46,6 +46,24 @@ create table t1(
primary key(id1)
) Engine=InnoDB;
+create table t1_copy_basic(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
+
+create table t1_copy_all(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
+
+create table t1_copy_resume(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
+
create table t1_id2_idx(
id2 bigint,
keyspace_id varbinary(10),
@@ -134,6 +152,24 @@ create table t1_sharded(
Name: "t1_id2_vdx",
}},
},
+ "t1_copy_basic": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "id1",
+ Name: "hash",
+ }},
+ },
+ "t1_copy_all": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "id1",
+ Name: "hash",
+ }},
+ },
+ "t1_copy_resume": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "id1",
+ Name: "hash",
+ }},
+ },
"t1_sharded": {
ColumnVindexes: []*vschemapb.ColumnVindex{{
Column: "id1",
@@ -195,6 +231,31 @@ create table t1_sharded(
},
},
}
+
+ schema2 = `
+create table t1_copy_all_ks2(
+ id1 bigint,
+ id2 bigint,
+ primary key(id1)
+) Engine=InnoDB;
+`
+
+ vschema2 = &vschemapb.Keyspace{
+ Sharded: true,
+ Vindexes: map[string]*vschemapb.Vindex{
+ "hash": {
+ Type: "hash",
+ },
+ },
+ Tables: map[string]*vschemapb.Table{
+ "t1_copy_all_ks2": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "id1",
+ Name: "hash",
+ }},
+ },
+ },
+ }
)
func TestMain(m *testing.M) {
@@ -203,14 +264,24 @@ func TestMain(m *testing.M) {
exitCode := func() int {
var cfg vttest.Config
cfg.Topology = &vttestpb.VTTestTopology{
- Keyspaces: []*vttestpb.Keyspace{{
- Name: "ks",
- Shards: []*vttestpb.Shard{{
- Name: "-80",
- }, {
- Name: "80-",
- }},
- }},
+ Keyspaces: []*vttestpb.Keyspace{
+ {
+ Name: "ks",
+ Shards: []*vttestpb.Shard{{
+ Name: "-80",
+ }, {
+ Name: "80-",
+ }},
+ },
+ {
+ Name: "ks2",
+ Shards: []*vttestpb.Shard{{
+ Name: "-80",
+ }, {
+ Name: "80-",
+ }},
+ },
+ },
}
if err := cfg.InitSchemas("ks", schema, vschema); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
@@ -218,6 +289,11 @@ func TestMain(m *testing.M) {
return 1
}
defer os.RemoveAll(cfg.SchemaDir)
+ if err := cfg.InitSchemas("ks2", schema2, vschema2); err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ os.RemoveAll(cfg.SchemaDir)
+ return 1
+ }
cfg.TabletHostName = *tabletHostName
diff --git a/go/vt/vtgate/endtoend/misc_test.go b/go/vt/vtgate/endtoend/misc_test.go
index 138b68d0aa3..aeeb1c122db 100644
--- a/go/vt/vtgate/endtoend/misc_test.go
+++ b/go/vt/vtgate/endtoend/misc_test.go
@@ -19,6 +19,7 @@ package endtoend
import (
"context"
"fmt"
+ osExec "os/exec"
"testing"
"github.com/stretchr/testify/assert"
@@ -55,6 +56,16 @@ func TestCreateAndDropDatabase(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
+ // cleanup the keyspace from the topology.
+ defer func() {
+ // the corresponding database needs to be created in advance.
+ // a subsequent DeleteKeyspace command returns the error of 'node doesn't exist' without it.
+ _ = exec(t, conn, "create database testitest")
+
+ _, err := osExec.Command("vtctldclient", "--server", grpcAddress, "DeleteKeyspace", "--recursive", "--force", "testitest").CombinedOutput()
+ require.NoError(t, err)
+ }()
+
// run it 3 times.
for count := 0; count < 3; count++ {
t.Run(fmt.Sprintf("exec:%d", count), func(t *testing.T) {
diff --git a/go/vt/vtgate/endtoend/row_count_test.go b/go/vt/vtgate/endtoend/row_count_test.go
index 9ac200b33fa..5a29f6177a9 100644
--- a/go/vt/vtgate/endtoend/row_count_test.go
+++ b/go/vt/vtgate/endtoend/row_count_test.go
@@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/test/endtoend/utils"
)
func TestRowCount(t *testing.T) {
@@ -31,6 +32,7 @@ func TestRowCount(t *testing.T) {
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
defer conn.Close()
+ utils.Exec(t, conn, "use ks")
type tc struct {
query string
expected int
diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go
index 477bb2518b5..16eea4c91f2 100644
--- a/go/vt/vtgate/endtoend/vstream_test.go
+++ b/go/vt/vtgate/endtoend/vstream_test.go
@@ -20,6 +20,8 @@ import (
"context"
"fmt"
"io"
+ "regexp"
+ "sort"
"sync"
"testing"
@@ -168,7 +170,7 @@ func TestVStreamCopyBasic(t *testing.T) {
gconn, conn, mconn, closeConnections := initialize(ctx, t)
defer closeConnections()
- _, err := conn.ExecuteFetch("insert into t1(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
+ _, err := conn.ExecuteFetch("insert into t1_copy_basic(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
if err != nil {
t.Fatal(err)
}
@@ -179,7 +181,7 @@ func TestVStreamCopyBasic(t *testing.T) {
}
qr := sqltypes.ResultToProto3(&lastPK)
tablePKs := []*binlogdatapb.TableLastPK{{
- TableName: "t1",
+ TableName: "t1_copy_basic",
Lastpk: qr,
}}
var shardGtids []*binlogdatapb.ShardGtid
@@ -199,8 +201,8 @@ func TestVStreamCopyBasic(t *testing.T) {
vgtid.ShardGtids = shardGtids
filter := &binlogdatapb.Filter{
Rules: []*binlogdatapb.Rule{{
- Match: "t1",
- Filter: "select * from t1",
+ Match: "t1_copy_basic",
+ Filter: "select * from t1_copy_basic",
}},
}
flags := &vtgatepb.VStreamFlags{}
@@ -209,19 +211,291 @@ func TestVStreamCopyBasic(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- numExpectedEvents := 2 /* num shards */ * (7 /* begin/field/vgtid:pos/2 rowevents avg/vgitd: lastpk/commit) */ + 3 /* begin/vgtid/commit for completed table */)
+ numExpectedEvents := 2 /* num shards */ *(7 /* begin/field/vgtid:pos/2 rowevents avg/vgitd: lastpk/commit) */ +3 /* begin/vgtid/commit for completed table */ +1 /* copy operation completed */) + 1 /* fully copy operation completed */
+ expectedCompletedEvents := []string{
+ `type:COPY_COMPLETED keyspace:"ks" shard:"-80"`,
+ `type:COPY_COMPLETED keyspace:"ks" shard:"80-"`,
+ `type:COPY_COMPLETED`,
+ }
require.NotNil(t, reader)
var evs []*binlogdatapb.VEvent
+ var completedEvs []*binlogdatapb.VEvent
for {
e, err := reader.Recv()
switch err {
case nil:
evs = append(evs, e...)
+
+ for _, ev := range e {
+ if ev.Type == binlogdatapb.VEventType_COPY_COMPLETED {
+ completedEvs = append(completedEvs, ev)
+ }
+ }
+
+ printEvents(evs) // for debugging ci failures
+
if len(evs) == numExpectedEvents {
+ sortCopyCompletedEvents(completedEvs)
+ for i, ev := range completedEvs {
+ require.Regexp(t, expectedCompletedEvents[i], ev.String())
+ }
t.Logf("TestVStreamCopyBasic was successful")
return
+ } else if numExpectedEvents < len(evs) {
+ t.Fatalf("len(events)=%v are not expected\n", len(evs))
+ }
+ case io.EOF:
+ log.Infof("stream ended\n")
+ cancel()
+ default:
+ log.Errorf("Returned err %v", err)
+ t.Fatalf("remote error: %v\n", err)
+ }
+ }
+}
+
+// TestVStreamCopyUnspecifiedShardGtid tests the case where the keyspace contains wildcards and/or the shard is not specified in the request.
+// Verify that the Vstream API resolves the unspecified ShardGtid input to a list of all the matching keyspaces and all the shards in the topology.
+// - If the keyspace contains wildcards and the shard is not specified, the copy operation should be performed on all shards of all matching keyspaces.
+// - If the keyspace is specified and the shard is not specified, the copy operation should be performed on all shards of the specified keyspace.
+func TestVStreamCopyUnspecifiedShardGtid(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ conn, err := mysql.Connect(ctx, &vtParams)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ defer conn.Close()
+
+ _, err = conn.ExecuteFetch("insert into t1_copy_all(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
+ if err != nil {
+ require.NoError(t, err)
+ }
+
+ _, err = conn.ExecuteFetch("insert into t1_copy_all_ks2(id1,id2) values(10,10), (20,20)", 1, false)
+ if err != nil {
+ require.NoError(t, err)
+ }
+
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "/t1_copy_all.*/",
+ }},
+ }
+ flags := &vtgatepb.VStreamFlags{}
+
+ // We have 2 shards in each keyspace. We assume the rows are
+ // evenly split across each shard. For each INSERT statement, which
+ // is a transaction and gets a global transaction identifier or GTID, we
+ // have 1 each of the following events:
+ // begin, field, position, lastpk, commit (5)
+ // For each row created in the INSERT statement -- 8 on ks1 and
+ // 2 on ks2 -- we have 1 row event between the begin and commit.
+ // When we have copied all rows for a table in the shard, the shard
+ // also gets events marking the transition from the copy phase to
+ // the streaming phase for that table with 1 each of the following:
+ // begin, vgtid, commit (3)
+ // As the copy phase completes for all tables on the shard, the shard
+ // gets 1 copy phase completed event.
+ // Lastly the stream has 1 final event to mark the final end to all
+ // copy phase operations in the vstream.
+ expectedKs1EventNum := 2 /* num shards */ * (9 /* begin/field/vgtid:pos/4 rowevents avg/vgitd: lastpk/commit) */ + 3 /* begin/vgtid/commit for completed table */ + 1 /* copy operation completed */)
+ expectedKs2EventNum := 2 /* num shards */ * (6 /* begin/field/vgtid:pos/1 rowevents avg/vgitd: lastpk/commit) */ + 3 /* begin/vgtid/commit for completed table */ + 1 /* copy operation completed */)
+ expectedFullyCopyCompletedNum := 1
+
+ cases := []struct {
+ name string
+ shardGtid *binlogdatapb.ShardGtid
+ expectedEventNum int
+ expectedCompletedEvents []string
+ }{
+ {
+ name: "copy from all keyspaces",
+ shardGtid: &binlogdatapb.ShardGtid{
+ Keyspace: "/.*",
+ },
+ expectedEventNum: expectedKs1EventNum + expectedKs2EventNum + expectedFullyCopyCompletedNum,
+ expectedCompletedEvents: []string{
+ `type:COPY_COMPLETED keyspace:"ks" shard:"-80"`,
+ `type:COPY_COMPLETED keyspace:"ks" shard:"80-"`,
+ `type:COPY_COMPLETED keyspace:"ks2" shard:"-80"`,
+ `type:COPY_COMPLETED keyspace:"ks2" shard:"80-"`,
+ `type:COPY_COMPLETED`,
+ },
+ },
+ {
+ name: "copy from all shards in one keyspace",
+ shardGtid: &binlogdatapb.ShardGtid{
+ Keyspace: "ks",
+ },
+ expectedEventNum: expectedKs1EventNum + expectedFullyCopyCompletedNum,
+ expectedCompletedEvents: []string{
+ `type:COPY_COMPLETED keyspace:"ks" shard:"-80"`,
+ `type:COPY_COMPLETED keyspace:"ks" shard:"80-"`,
+ `type:COPY_COMPLETED`,
+ },
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ gconn, conn, mconn, closeConnections := initialize(ctx, t)
+ defer closeConnections()
+
+ var vgtid = &binlogdatapb.VGtid{}
+ vgtid.ShardGtids = []*binlogdatapb.ShardGtid{c.shardGtid}
+ reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags)
+ _, _ = conn, mconn
+ if err != nil {
+ require.NoError(t, err)
+ }
+ require.NotNil(t, reader)
+ var evs []*binlogdatapb.VEvent
+ var completedEvs []*binlogdatapb.VEvent
+ for {
+ e, err := reader.Recv()
+ switch err {
+ case nil:
+ evs = append(evs, e...)
+
+ for _, ev := range e {
+ if ev.Type == binlogdatapb.VEventType_COPY_COMPLETED {
+ completedEvs = append(completedEvs, ev)
+ }
+ }
+
+ if len(evs) == c.expectedEventNum {
+ sortCopyCompletedEvents(completedEvs)
+ for i, ev := range completedEvs {
+ require.Equal(t, c.expectedCompletedEvents[i], ev.String())
+ }
+ t.Logf("TestVStreamCopyUnspecifiedShardGtid was successful")
+ return
+ } else if c.expectedEventNum < len(evs) {
+ printEvents(evs) // for debugging ci failures
+ require.FailNow(t, "len(events)=%v are not expected\n", len(evs))
+ }
+ case io.EOF:
+ log.Infof("stream ended\n")
+ cancel()
+ default:
+ log.Errorf("Returned err %v", err)
+ require.FailNow(t, "remote error: %v\n", err)
+ }
+ }
+ })
+ }
+}
+
+func TestVStreamCopyResume(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ gconn, conn, mconn, closeConnections := initialize(ctx, t)
+ defer closeConnections()
+
+ _, err := conn.ExecuteFetch("insert into t1_copy_resume(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Any subsequent GTIDs will be part of the stream
+ mpos, err := mconn.PrimaryPosition()
+ require.NoError(t, err)
+
+ // lastPK is id1=4, meaning we should only copy rows for id1 IN(5,6,7,8,9)
+ lastPK := sqltypes.Result{
+ Fields: []*query.Field{{Name: "id1", Type: query.Type_INT64}},
+ Rows: [][]sqltypes.Value{{sqltypes.NewInt64(4)}},
+ }
+ tableLastPK := []*binlogdatapb.TableLastPK{{
+ TableName: "t1_copy_resume",
+ Lastpk: sqltypes.ResultToProto3(&lastPK),
+ }}
+
+ catchupQueries := []string{
+ "insert into t1_copy_resume(id1,id2) values(9,9)", // this row will show up twice: once in catchup and copy
+ "update t1_copy_resume set id2 = 10 where id1 = 1",
+ "insert into t1(id1, id2) values(100,100)",
+ "delete from t1_copy_resume where id1 = 1",
+ "update t1_copy_resume set id2 = 90 where id1 = 9",
+ }
+ for _, query := range catchupQueries {
+ _, err = conn.ExecuteFetch(query, 1, false)
+ require.NoError(t, err)
+ }
+
+ var shardGtids []*binlogdatapb.ShardGtid
+ var vgtid = &binlogdatapb.VGtid{}
+ shardGtids = append(shardGtids, &binlogdatapb.ShardGtid{
+ Keyspace: "ks",
+ Shard: "-80",
+ Gtid: fmt.Sprintf("%s/%s", mpos.GTIDSet.Flavor(), mpos),
+ TablePKs: tableLastPK,
+ })
+ shardGtids = append(shardGtids, &binlogdatapb.ShardGtid{
+ Keyspace: "ks",
+ Shard: "80-",
+ Gtid: fmt.Sprintf("%s/%s", mpos.GTIDSet.Flavor(), mpos),
+ TablePKs: tableLastPK,
+ })
+ vgtid.ShardGtids = shardGtids
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1_copy_resume",
+ Filter: "select * from t1_copy_resume",
+ }},
+ }
+ flags := &vtgatepb.VStreamFlags{}
+ reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags)
+ if err != nil {
+ t.Fatal(err)
+ }
+ require.NotNil(t, reader)
+
+ expectedRowCopyEvents := 5 // id1 and id2 IN(5,6,7,8,9)
+ expectedCatchupEvents := len(catchupQueries) - 1 // insert into t1 should never reach
+ rowCopyEvents, replCatchupEvents := 0, 0
+ expectedEvents := []string{
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:1 values:"11"} after:{lengths:1 lengths:2 values:"110"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:2 values:"110"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"55"}} keyspace:"ks" shard:"-80"} keyspace:"ks" shard:"-80"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"66"}} keyspace:"ks" shard:"80-"} keyspace:"ks" shard:"80-"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"77"}} keyspace:"ks" shard:"80-"} keyspace:"ks" shard:"80-"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"88"}} keyspace:"ks" shard:"80-"} keyspace:"ks" shard:"80-"`,
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:1 values:"99"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:2 values:"990"}} keyspace:"ks" shard:"-80"} keyspace:"ks" shard:"-80"`,
+ `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:1 values:"99"} after:{lengths:1 lengths:2 values:"990"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`,
+ }
+ var evs []*binlogdatapb.VEvent
+ for {
+ e, err := reader.Recv()
+ switch err {
+ case nil:
+ for _, ev := range e {
+ if ev.Type == binlogdatapb.VEventType_ROW {
+ evs = append(evs, ev)
+ if ev.Timestamp == 0 {
+ rowCopyEvents++
+ } else {
+ replCatchupEvents++
+ }
+ printEvents(evs) // for debugging ci failures
+ }
+ if ev.Type == binlogdatapb.VEventType_VGTID {
+ // Validate that the vgtid event the client receives from the vstream copy has a complete TableLastPK proto message.
+ // Also, to ensure that the client can resume properly, make sure that
+ // the Fields value is present in the sqltypes.Result field and not missing.
+ require.Regexp(t, `type:VGTID vgtid:{(shard_gtids:{keyspace:"ks" shard:"(80-|-80)" gtid:".+" table_p_ks:{table_name:"t1_copy_resume" lastpk:{fields:{name:"id1" type:INT64} rows:{lengths:1 values:"[0-9]"}}}})+} keyspace:"ks" shard:"(80-|-80)"`, ev.String())
+ }
+ }
+ if expectedCatchupEvents == replCatchupEvents && expectedRowCopyEvents == rowCopyEvents {
+ sort.Sort(VEventSorter(evs))
+ for i, ev := range evs {
+ require.Regexp(t, expectedEvents[i], ev.String())
+ }
+ t.Logf("TestVStreamCopyResume was successful")
+ return
}
- printEvents(evs) // for debugging ci failures
case io.EOF:
log.Infof("stream ended\n")
cancel()
@@ -330,9 +604,9 @@ func TestVStreamSharded(t *testing.T) {
received bool
}
expectedEvents := []*expectedEvent{
- {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768} keyspace:"ks" shard:"-80"}`, false},
+ {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"-80"}`, false},
{`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"11"}} keyspace:"ks" shard:"-80"}`, false},
- {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768} keyspace:"ks" shard:"80-"}`, false},
+ {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"80-"}`, false},
{`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"44"}} keyspace:"ks" shard:"80-"}`, false},
}
for {
@@ -357,7 +631,7 @@ func TestVStreamSharded(t *testing.T) {
for _, ev := range evs {
s := fmt.Sprintf("%v", ev)
for _, expectedEv := range expectedEvents {
- if expectedEv.ev == s {
+ if removeAnyDeprecatedDisplayWidths(expectedEv.ev) == removeAnyDeprecatedDisplayWidths(s) {
expectedEv.received = true
break
}
@@ -381,6 +655,136 @@ func TestVStreamSharded(t *testing.T) {
}
+// TestVStreamCopyTransactions tests that we are properly wrapping
+// ROW events in the stream with BEGIN and COMMIT events.
+func TestVStreamCopyTransactions(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ keyspace := "ks"
+ shards := []string{"-80", "80-"}
+ table := "t1_copy_basic"
+ beginEventSeen, commitEventSeen := false, false
+ numResultInTrx := 0
+ vgtid := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{
+ {
+ Keyspace: keyspace,
+ Shard: shards[0],
+ Gtid: "", // Start a vstream copy
+ },
+ {
+ Keyspace: keyspace,
+ Shard: shards[1],
+ Gtid: "", // Start a vstream copy
+ },
+ },
+ }
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: table,
+ Filter: fmt.Sprintf("select * from %s", table),
+ }},
+ }
+
+ gconn, conn, _, closeConnections := initialize(ctx, t)
+ defer closeConnections()
+
+ // Clear any existing data.
+ q := fmt.Sprintf("delete from %s", table)
+ _, err := conn.ExecuteFetch(q, -1, false)
+ require.NoError(t, err, "error clearing data: %v", err)
+
+ // Generate some test data. Enough to cross the default
+ // vstream_packet_size threshold.
+ for i := 1; i <= 100000; i++ {
+ values := fmt.Sprintf("(%d, %d)", i, i)
+ q := fmt.Sprintf("insert into %s (id1, id2) values %s", table, values)
+ _, err := conn.ExecuteFetch(q, 1, false)
+ require.NoError(t, err, "error inserting data: %v", err)
+ }
+
+ // Start a vstream.
+ reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, nil)
+ require.NoError(t, err, "error starting vstream: %v", err)
+
+recvLoop:
+ for {
+ vevents, err := reader.Recv()
+ numResultInTrx++
+ eventCount := len(vevents)
+ t.Logf("------------------ Received %d events in response #%d for the transaction ------------------\n",
+ eventCount, numResultInTrx)
+ switch err {
+ case nil:
+ for _, event := range vevents {
+ switch event.Type {
+ case binlogdatapb.VEventType_BEGIN:
+ require.False(t, beginEventSeen, "received a second BEGIN event within the transaction: numResultInTrx=%d\n",
+ numResultInTrx)
+ beginEventSeen = true
+ t.Logf("Found BEGIN event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx)
+ require.False(t, commitEventSeen, "received a BEGIN event when expecting a COMMIT event: numResultInTrx=%d\n",
+ numResultInTrx)
+ case binlogdatapb.VEventType_VGTID:
+ t.Logf("Found VGTID event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ case binlogdatapb.VEventType_FIELD:
+ t.Logf("Found FIELD event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ case binlogdatapb.VEventType_ROW:
+ // Uncomment if you need to do more debugging.
+ // t.Logf("Found ROW event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ // beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ case binlogdatapb.VEventType_COMMIT:
+ commitEventSeen = true
+ t.Logf("Found COMMIT event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n",
+ beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event)
+ require.True(t, beginEventSeen, "received COMMIT event before receiving BEGIN event: numResultInTrx=%d\n",
+ numResultInTrx)
+ case binlogdatapb.VEventType_COPY_COMPLETED:
+ t.Logf("Finished vstream copy\n")
+ t.Logf("-------------------------------------------------------------------\n\n")
+ cancel()
+ break recvLoop
+ default:
+ t.Logf("Found extraneous event: %+v\n", event)
+ }
+ if beginEventSeen && commitEventSeen {
+ t.Logf("Received both BEGIN and COMMIT, so resetting transactional state\n")
+ beginEventSeen = false
+ commitEventSeen = false
+ numResultInTrx = 0
+ }
+ }
+ case io.EOF:
+ t.Logf("vstream ended\n")
+ t.Logf("-------------------------------------------------------------------\n\n")
+ cancel()
+ return
+ default:
+ require.FailNowf(t, "unexpected error", "encountered error in vstream: %v", err)
+ return
+ }
+ }
+ // The last response, when the vstream copy completes, does not
+ // typically contain ROW events.
+ if beginEventSeen || commitEventSeen {
+ require.True(t, (beginEventSeen && commitEventSeen), "did not receive both BEGIN and COMMIT events in the final ROW event set")
+ }
+}
+
+func removeAnyDeprecatedDisplayWidths(orig string) string {
+ var adjusted string
+ baseIntType := "int"
+ intRE := regexp.MustCompile(`(?i)int\(([0-9]*)?\)`)
+ adjusted = intRE.ReplaceAllString(orig, baseIntType)
+ baseYearType := "year"
+ yearRE := regexp.MustCompile(`(?i)year\(([0-9]*)?\)`)
+ adjusted = yearRE.ReplaceAllString(adjusted, baseYearType)
+ return adjusted
+}
+
var printMu sync.Mutex
func printEvents(evs []*binlogdatapb.VEvent) {
@@ -396,3 +800,47 @@ func printEvents(evs []*binlogdatapb.VEvent) {
s += "===END===" + "\n"
log.Infof("%s", s)
}
+
+// Sort the VEvents by the first row change's after value bytes primarily, with
+// secondary ordering by timestamp (ASC). Note that row copy events do not have
+// a timestamp and the value will be 0.
+type VEventSorter []*binlogdatapb.VEvent
+
+func (v VEventSorter) Len() int {
+ return len(v)
+}
+func (v VEventSorter) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
+func (v VEventSorter) Less(i, j int) bool {
+ valsI := v[i].GetRowEvent().RowChanges[0].After
+ if valsI == nil {
+ valsI = v[i].GetRowEvent().RowChanges[0].Before
+ }
+ valsJ := v[j].GetRowEvent().RowChanges[0].After
+ if valsJ == nil {
+ valsJ = v[j].GetRowEvent().RowChanges[0].Before
+ }
+ valI := string(valsI.Values)
+ valJ := string(valsJ.Values)
+ if valI == valJ {
+ return v[i].Timestamp < v[j].Timestamp
+ }
+ return valI < valJ
+}
+
+// The arrival order of COPY_COMPLETED events with keyspace/shard is not constant.
+// On the other hand, the last event should always be a fully COPY_COMPLETED event.
+// That's why the sort.Slice doesn't have to handle the last element in completedEvs.
+func sortCopyCompletedEvents(completedEvs []*binlogdatapb.VEvent) {
+ sortVEventByKeyspaceAndShard(completedEvs[:len(completedEvs)-1])
+}
+
+func sortVEventByKeyspaceAndShard(evs []*binlogdatapb.VEvent) {
+ sort.Slice(evs, func(i, j int) bool {
+ if evs[i].Keyspace == evs[j].Keyspace {
+ return evs[i].Shard < evs[j].Shard
+ }
+ return evs[i].Keyspace < evs[j].Keyspace
+ })
+}
diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go
index e11adce3858..7858ccfc938 100644
--- a/go/vt/vtgate/engine/concatenate.go
+++ b/go/vt/vtgate/engine/concatenate.go
@@ -143,12 +143,23 @@ func (c *Concatenate) getFields(res []*sqltypes.Result) ([]*querypb.Field, error
}
func (c *Concatenate) execSources(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) {
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
+ if vcursor.Session().InTransaction() {
+ // as we are in a transaction, we need to execute all queries inside a single transaction
+ // therefore it needs a sequential execution.
+ return c.sequentialExec(ctx, vcursor, bindVars, wantfields)
+ }
+ // not in transaction, so execute in parallel.
+ return c.parallelExec(ctx, vcursor, bindVars, wantfields)
+}
+
+func (c *Concatenate) parallelExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) {
results := make([]*sqltypes.Result, len(c.Sources))
- var wg sync.WaitGroup
var outerErr error
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var wg sync.WaitGroup
for i, source := range c.Sources {
currIndex, currSource := i, source
vars := copyBindVars(bindVars)
@@ -164,14 +175,35 @@ func (c *Concatenate) execSources(ctx context.Context, vcursor VCursor, bindVars
}()
}
wg.Wait()
- if outerErr != nil {
- return nil, outerErr
+ return results, outerErr
+}
+
+func (c *Concatenate) sequentialExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) {
+ results := make([]*sqltypes.Result, len(c.Sources))
+ for i, source := range c.Sources {
+ currIndex, currSource := i, source
+ vars := copyBindVars(bindVars)
+ result, err := vcursor.ExecutePrimitive(ctx, currSource, vars, wantfields)
+ if err != nil {
+ return nil, err
+ }
+ results[currIndex] = result
}
return results, nil
}
// TryStreamExecute performs a streaming exec.
func (c *Concatenate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
+ if vcursor.Session().InTransaction() {
+ // as we are in a transaction, we need to execute all queries inside a single transaction
+ // therefore it needs a sequential execution.
+ return c.sequentialStreamExec(ctx, vcursor, bindVars, wantfields, callback)
+ }
+ // not in transaction, so execute in parallel.
+ return c.parallelStreamExec(ctx, vcursor, bindVars, wantfields, callback)
+}
+
+func (c *Concatenate) parallelStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
var seenFields []*querypb.Field
var outerErr error
@@ -237,6 +269,44 @@ func (c *Concatenate) TryStreamExecute(ctx context.Context, vcursor VCursor, bin
return outerErr
}
+func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
+ // all the below fields ensure that the fields are sent only once.
+ var seenFields []*querypb.Field
+ var fieldsMu sync.Mutex
+ var fieldsSent bool
+
+ for idx, source := range c.Sources {
+ err := vcursor.StreamExecutePrimitive(ctx, source, bindVars, wantfields, func(resultChunk *sqltypes.Result) error {
+ // if we have fields to compare, make sure all the fields are all the same
+ if idx == 0 {
+ fieldsMu.Lock()
+ defer fieldsMu.Unlock()
+ if !fieldsSent {
+ fieldsSent = true
+ seenFields = resultChunk.Fields
+ return callback(resultChunk)
+ }
+ }
+ if resultChunk.Fields != nil {
+ err := c.compareFields(seenFields, resultChunk.Fields)
+ if err != nil {
+ return err
+ }
+ }
+ // check if context has expired.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ return callback(resultChunk)
+
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// GetFields fetches the field info.
func (c *Concatenate) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
// TODO: type coercions
diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go
index e5f4c4dd74a..90cb611bbea 100644
--- a/go/vt/vtgate/engine/fake_vcursor_test.go
+++ b/go/vt/vtgate/engine/fake_vcursor_test.go
@@ -50,6 +50,10 @@ var _ SessionActions = (*noopVCursor)(nil)
type noopVCursor struct {
}
+func (t *noopVCursor) InTransaction() bool {
+ return false
+}
+
func (t *noopVCursor) SetCommitOrder(co vtgatepb.CommitOrder) {
//TODO implement me
panic("implement me")
@@ -101,6 +105,10 @@ func (t *noopVCursor) ExecutePrimitive(ctx context.Context, primitive Primitive,
return primitive.TryExecute(ctx, t, bindVars, wantfields)
}
+func (t *noopVCursor) ExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ return primitive.TryExecute(ctx, t, bindVars, wantfields)
+}
+
func (t *noopVCursor) StreamExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
return primitive.TryStreamExecute(ctx, t, bindVars, wantfields, callback)
}
@@ -240,10 +248,18 @@ func (t *noopVCursor) SetWorkload(querypb.ExecuteOptions_Workload) {
panic("implement me")
}
+func (t *noopVCursor) SetWorkloadName(string) {
+ panic("implement me")
+}
+
func (t *noopVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) {
panic("implement me")
}
+func (t *noopVCursor) SetPriority(string) {
+ panic("implement me")
+}
+
func (t *noopVCursor) SetTarget(string) error {
panic("implement me")
}
@@ -346,6 +362,10 @@ func (f *loggingVCursor) ExecutePrimitive(ctx context.Context, primitive Primiti
return primitive.TryExecute(ctx, f, bindVars, wantfields)
}
+func (f *loggingVCursor) ExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ return primitive.TryExecute(ctx, f, bindVars, wantfields)
+}
+
func (f *loggingVCursor) StreamExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
return primitive.TryStreamExecute(ctx, f, bindVars, wantfields, callback)
}
@@ -657,10 +677,18 @@ func (f *loggingVCursor) SetWorkload(querypb.ExecuteOptions_Workload) {
panic("implement me")
}
+func (f *loggingVCursor) SetWorkloadName(string) {
+ panic("implement me")
+}
+
func (f *loggingVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) {
panic("implement me")
}
+func (f *loggingVCursor) SetPriority(string) {
+ panic("implement me")
+}
+
func (f *loggingVCursor) FindRoutedTable(tbl sqlparser.TableName) (*vindexes.Table, error) {
f.log = append(f.log, fmt.Sprintf("FindTable(%s)", sqlparser.String(tbl)))
return f.tableRoutes.tbl, nil
diff --git a/go/vt/vtgate/engine/filter.go b/go/vt/vtgate/engine/filter.go
index f36467a7526..fb696a9d679 100644
--- a/go/vt/vtgate/engine/filter.go
+++ b/go/vt/vtgate/engine/filter.go
@@ -68,11 +68,8 @@ func (f *Filter) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[s
if err != nil {
return nil, err
}
- intEvalResult, err := evalResult.Value().ToInt64()
- if err != nil {
- return nil, err
- }
- if intEvalResult == 1 {
+
+ if evalResult.ToBoolean() {
rows = append(rows, row)
}
}
diff --git a/go/vt/vtgate/engine/join.go b/go/vt/vtgate/engine/join.go
index c67a0951b35..c47b523b9b1 100644
--- a/go/vt/vtgate/engine/join.go
+++ b/go/vt/vtgate/engine/join.go
@@ -22,6 +22,7 @@ import (
"strings"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/sync2"
querypb "vitess.io/vitess/go/vt/proto/query"
)
@@ -95,34 +96,36 @@ func (jn *Join) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st
// TryStreamExecute performs a streaming exec.
func (jn *Join) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
- joinVars := make(map[string]*querypb.BindVariable)
- err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, wantfields, func(lresult *sqltypes.Result) error {
+ var fieldNeeded sync2.AtomicBool
+ fieldNeeded.Set(wantfields)
+ err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, fieldNeeded.Get(), func(lresult *sqltypes.Result) error {
+ joinVars := make(map[string]*querypb.BindVariable)
for _, lrow := range lresult.Rows {
for k, col := range jn.Vars {
joinVars[k] = sqltypes.ValueBindVariable(lrow[col])
}
- rowSent := false
- err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), wantfields, func(rresult *sqltypes.Result) error {
+ var rowSent sync2.AtomicBool
+ err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), fieldNeeded.Get(), func(rresult *sqltypes.Result) error {
result := &sqltypes.Result{}
- if wantfields {
+ if fieldNeeded.Get() {
// This code is currently unreachable because the first result
// will always be just the field info, which will cause the outer
// wantfields code path to be executed. But this may change in the future.
- wantfields = false
+ fieldNeeded.Set(false)
result.Fields = joinFields(lresult.Fields, rresult.Fields, jn.Cols)
}
for _, rrow := range rresult.Rows {
result.Rows = append(result.Rows, joinRows(lrow, rrow, jn.Cols))
}
if len(rresult.Rows) != 0 {
- rowSent = true
+ rowSent.Set(true)
}
return callback(result)
})
if err != nil {
return err
}
- if jn.Opcode == LeftJoin && !rowSent {
+ if jn.Opcode == LeftJoin && !rowSent.Get() {
result := &sqltypes.Result{}
result.Rows = [][]sqltypes.Value{joinRows(
lrow,
@@ -132,8 +135,8 @@ func (jn *Join) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars
return callback(result)
}
}
- if wantfields {
- wantfields = false
+ if fieldNeeded.Get() {
+ fieldNeeded.Set(false)
for k := range jn.Vars {
joinVars[k] = sqltypes.NullBindVariable
}
diff --git a/go/vt/vtgate/engine/plan.go b/go/vt/vtgate/engine/plan.go
index d6e4ed1118e..769c69aaa06 100644
--- a/go/vt/vtgate/engine/plan.go
+++ b/go/vt/vtgate/engine/plan.go
@@ -17,6 +17,7 @@ limitations under the License.
package engine
import (
+ "bytes"
"encoding/json"
"sync/atomic"
"time"
@@ -98,5 +99,14 @@ func (p *Plan) MarshalJSON() ([]byte, error) {
Errors: atomic.LoadUint64(&p.Errors),
TablesUsed: p.TablesUsed,
}
- return json.Marshal(marshalPlan)
+
+ b := new(bytes.Buffer)
+ enc := json.NewEncoder(b)
+ enc.SetEscapeHTML(false)
+ err := enc.Encode(marshalPlan)
+ if err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
}
diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go
index 4433dcd69dc..0e7929bbe0c 100644
--- a/go/vt/vtgate/engine/plan_description.go
+++ b/go/vt/vtgate/engine/plan_description.go
@@ -161,12 +161,11 @@ func addMap(input map[string]any, buf *bytes.Buffer) error {
func marshalAdd(prepend string, buf *bytes.Buffer, name string, obj any) error {
buf.WriteString(prepend + `"` + name + `":`)
- b, err := json.Marshal(obj)
- if err != nil {
- return err
- }
- buf.Write(b)
- return nil
+
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+
+ return enc.Encode(obj)
}
// PrimitiveToPlanDescription transforms a primitive tree into a corresponding PlanDescription tree
diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go
index 796c1dcb551..0dfd0e30ff0 100644
--- a/go/vt/vtgate/engine/primitive.go
+++ b/go/vt/vtgate/engine/primitive.go
@@ -57,9 +57,15 @@ type (
Execute(ctx context.Context, method string, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error)
AutocommitApproval() bool
- // Primitive functions
+ // Execute the given primitive
ExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error)
+ // Execute the given primitive in a new autocommit session
+ ExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error)
+
+ // Execute the given primitive
StreamExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error
+ // Execute the given primitive in a new autocommit session
+ StreamExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(result *sqltypes.Result) error) error
// Shard-level functions.
ExecuteMultiShard(ctx context.Context, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, rollbackOnError, canAutocommit bool) (*sqltypes.Result, []error)
@@ -109,9 +115,6 @@ type (
// ReleaseLock releases all the held advisory locks.
ReleaseLock(ctx context.Context) error
-
- // StreamExecutePrimitiveStandalone executes the primitive in its own new autocommit session.
- StreamExecutePrimitiveStandalone(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(result *sqltypes.Result) error) error
}
// SessionActions gives primitives ability to interact with the session state
@@ -141,6 +144,8 @@ type (
SetTransactionMode(vtgatepb.TransactionMode)
SetWorkload(querypb.ExecuteOptions_Workload)
SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion)
+ SetWorkloadName(string)
+ SetPriority(string)
SetFoundRows(uint64)
SetDDLStrategy(string)
@@ -180,6 +185,10 @@ type (
// SetCommitOrder sets the commit order for the shard session in respect of the type of vindex lookup.
// This is used to select the right shard session to perform the vindex lookup query.
SetCommitOrder(co vtgatepb.CommitOrder)
+
+ // InTransaction returns true if the session has already opened transaction or
+ // will start a transaction on the query execution.
+ InTransaction() bool
}
// Match is used to check if a Primitive matches
diff --git a/go/vt/vtgate/engine/scalar_aggregation.go b/go/vt/vtgate/engine/scalar_aggregation.go
index 99fd21d8ea1..a1a76091689 100644
--- a/go/vt/vtgate/engine/scalar_aggregation.go
+++ b/go/vt/vtgate/engine/scalar_aggregation.go
@@ -122,7 +122,7 @@ func (sa *ScalarAggregate) TryExecute(ctx context.Context, vcursor VCursor, bind
}
out.Rows = [][]sqltypes.Value{resultRow}
- return out, nil
+ return out.Truncate(sa.TruncateColumnCount), nil
}
// TryStreamExecute implements the Primitive interface
diff --git a/go/vt/vtgate/engine/scalar_aggregation_test.go b/go/vt/vtgate/engine/scalar_aggregation_test.go
index 11e5b20a72b..15e72639f3d 100644
--- a/go/vt/vtgate/engine/scalar_aggregation_test.go
+++ b/go/vt/vtgate/engine/scalar_aggregation_test.go
@@ -106,16 +106,16 @@ func TestEmptyRows(outer *testing.T) {
func TestScalarAggregateStreamExecute(t *testing.T) {
assert := assert.New(t)
fields := sqltypes.MakeTestFields(
- "count(*)",
- "uint64",
+ "col|weight_string(col)",
+ "uint64|varbinary",
)
fp := &fakePrimitive{
allResultsInOneCall: true,
results: []*sqltypes.Result{
sqltypes.MakeTestResult(fields,
- "1",
+ "1|null",
), sqltypes.MakeTestResult(fields,
- "3",
+ "3|null",
)},
}
@@ -141,3 +141,34 @@ func TestScalarAggregateStreamExecute(t *testing.T) {
got := fmt.Sprintf("%v", results[1].Rows)
assert.Equal("[[UINT64(4)]]", got)
}
+
+// TestScalarAggregateExecuteTruncate checks if truncate works
+func TestScalarAggregateExecuteTruncate(t *testing.T) {
+ assert := assert.New(t)
+ fields := sqltypes.MakeTestFields(
+ "col|weight_string(col)",
+ "uint64|varbinary",
+ )
+
+ fp := &fakePrimitive{
+ allResultsInOneCall: true,
+ results: []*sqltypes.Result{
+ sqltypes.MakeTestResult(fields,
+ "1|null", "3|null",
+ )},
+ }
+
+ oa := &ScalarAggregate{
+ Aggregates: []*AggregateParams{{
+ Opcode: AggregateSum,
+ Col: 0,
+ }},
+ Input: fp,
+ TruncateColumnCount: 1,
+ PreProcess: true,
+ }
+
+ qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, true)
+ assert.NoError(err)
+ assert.Equal("[[UINT64(4)]]", fmt.Sprintf("%v", qr.Rows))
+}
diff --git a/go/vt/vtgate/engine/vindex_lookup.go b/go/vt/vtgate/engine/vindex_lookup.go
index 8883f138dcd..816507ae086 100644
--- a/go/vt/vtgate/engine/vindex_lookup.go
+++ b/go/vt/vtgate/engine/vindex_lookup.go
@@ -187,10 +187,17 @@ func (vr *VindexLookup) executeNonBatch(ctx context.Context, vcursor VCursor, id
bindVars := map[string]*querypb.BindVariable{
vr.Arguments[0]: vars,
}
- result, err := vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+
+ var result *sqltypes.Result
+ if vr.Vindex.AutoCommitEnabled() {
+ result, err = vcursor.ExecutePrimitiveStandalone(ctx, vr.Lookup, bindVars, false)
+ } else {
+ result, err = vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+ }
if err != nil {
return nil, err
}
+
rows := make([][]sqltypes.Value, 0, len(result.Rows))
for _, row := range result.Rows {
rows = append(rows, []sqltypes.Value{row[1]})
@@ -212,7 +219,17 @@ func (vr *VindexLookup) executeBatch(ctx context.Context, vcursor VCursor, ids [
bindVars := map[string]*querypb.BindVariable{
vr.Arguments[0]: vars,
}
- result, err := vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+
+ var result *sqltypes.Result
+ if vr.Vindex.AutoCommitEnabled() {
+ result, err = vcursor.ExecutePrimitiveStandalone(ctx, vr.Lookup, bindVars, false)
+ } else {
+ result, err = vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false)
+ }
+ if err != nil {
+ return nil, err
+ }
+
if err != nil {
return nil, vterrors.Wrapf(err, "failed while running the lookup query")
}
diff --git a/go/vt/vtgate/evalengine/convert.go b/go/vt/vtgate/evalengine/convert.go
index e769196b62c..d1996fcb1ee 100644
--- a/go/vt/vtgate/evalengine/convert.go
+++ b/go/vt/vtgate/evalengine/convert.go
@@ -39,6 +39,10 @@ type (
)
func (c *ConvertExpr) unsupported() {
+ throwEvalError(c.returnUnsupportedError())
+}
+
+func (c *ConvertExpr) returnUnsupportedError() error {
var err error
switch {
case c.HasLength && c.HasScale:
@@ -48,7 +52,7 @@ func (c *ConvertExpr) unsupported() {
default:
err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", c.Type)
}
- throwEvalError(err)
+ return err
}
func (c *ConvertExpr) eval(env *ExpressionEnv, result *EvalResult) {
@@ -86,15 +90,9 @@ func (c *ConvertExpr) eval(env *ExpressionEnv, result *EvalResult) {
case "FLOAT":
if c.HasLength {
switch p := c.Length; {
- case p <= 24:
- c.unsupported()
- case p <= 53:
- result.makeFloat()
- default:
+ case p > 53:
throwEvalError(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 53.", p))
}
- } else {
- c.unsupported()
}
c.unsupported()
case "SIGNED", "SIGNED INTEGER":
diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go
index 2f2de033fdc..1971fdc39c2 100644
--- a/go/vt/vtgate/evalengine/eval_result.go
+++ b/go/vt/vtgate/evalengine/eval_result.go
@@ -309,6 +309,10 @@ func (er *EvalResult) isTextual() bool {
return sqltypes.IsText(tt) || sqltypes.IsBinary(tt)
}
+func (er *EvalResult) ToBoolean() bool {
+ return er.isTruthy() == boolTrue
+}
+
func (er *EvalResult) isTruthy() boolean {
if er.isNull() {
return boolNULL
diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go
index 6fdce977b08..d2ba9326911 100644
--- a/go/vt/vtgate/evalengine/translate.go
+++ b/go/vt/vtgate/evalengine/translate.go
@@ -493,6 +493,11 @@ func translateConvertExpr(expr sqlparser.Expr, convertType *sqlparser.ConvertTyp
if err != nil {
return nil, err
}
+ case "BINARY", "DOUBLE", "REAL", "SIGNED", "SIGNED INTEGER", "UNSIGNED", "UNSIGNED INTEGER":
+ // Supported types for conv expression
+ default:
+ // For unsupported types, we should return an error on translation instead of returning an error on runtime.
+ return nil, convert.returnUnsupportedError()
}
return &convert, nil
diff --git a/go/vt/vtgate/evalengine/translate_test.go b/go/vt/vtgate/evalengine/translate_test.go
index ce8249653dc..22cede333ba 100644
--- a/go/vt/vtgate/evalengine/translate_test.go
+++ b/go/vt/vtgate/evalengine/translate_test.go
@@ -340,3 +340,37 @@ func TestEvaluateTuple(t *testing.T) {
})
}
}
+
+// TestTranslationFailures tests that translation fails for functions that we don't support evaluation for.
+func TestTranslationFailures(t *testing.T) {
+ testcases := []struct {
+ expression string
+ expectedErr string
+ }{
+ {
+ expression: "cast('2023-01-07 12:34:56' as date)",
+ expectedErr: "Unsupported type conversion: DATE",
+ }, {
+ expression: "cast('2023-01-07 12:34:56' as datetime(5))",
+ expectedErr: "Unsupported type conversion: DATETIME(5)",
+ }, {
+ expression: "cast('3.4' as FLOAT)",
+ expectedErr: "Unsupported type conversion: FLOAT",
+ }, {
+ expression: "cast('3.4' as FLOAT(3))",
+ expectedErr: "Unsupported type conversion: FLOAT(3)",
+ },
+ }
+
+ for _, testcase := range testcases {
+ t.Run(testcase.expression, func(t *testing.T) {
+ // Given
+ stmt, err := sqlparser.Parse("select " + testcase.expression)
+ require.NoError(t, err)
+ astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr
+ _, err = Translate(astExpr, LookupDefaultCollation(45))
+ require.EqualError(t, err, testcase.expectedErr)
+ })
+ }
+
+}
diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go
index 8214361da4c..3f73f657666 100644
--- a/go/vt/vtgate/executor.go
+++ b/go/vt/vtgate/executor.go
@@ -115,6 +115,9 @@ type Executor struct {
// allowScatter will fail planning if set to false and a plan contains any scatter queries
allowScatter bool
+ // allowVstreamCopy will fail on vstream copy if false and no GTID provided for the stream.
+ // This is temporary until RDONLYs are properly supported for bootstrapping.
+ allowVstreamCopy bool
}
var executorOnce sync.Once
@@ -135,20 +138,22 @@ func NewExecutor(
schemaTracker SchemaInfo,
noScatter bool,
pv plancontext.PlannerVersion,
+ noVstreamCopy bool,
) *Executor {
e := &Executor{
- serv: serv,
- cell: cell,
- resolver: resolver,
- scatterConn: resolver.scatterConn,
- txConn: resolver.scatterConn.txConn,
- plans: cache.NewDefaultCacheImpl(cacheCfg),
- normalize: normalize,
- warnShardedOnly: warnOnShardedOnly,
- streamSize: streamSize,
- schemaTracker: schemaTracker,
- allowScatter: !noScatter,
- pv: pv,
+ serv: serv,
+ cell: cell,
+ resolver: resolver,
+ scatterConn: resolver.scatterConn,
+ txConn: resolver.scatterConn.txConn,
+ plans: cache.NewDefaultCacheImpl(cacheCfg),
+ normalize: normalize,
+ warnShardedOnly: warnOnShardedOnly,
+ streamSize: streamSize,
+ schemaTracker: schemaTracker,
+ allowScatter: !noScatter,
+ allowVstreamCopy: !noVstreamCopy,
+ pv: pv,
}
vschemaacl.Init()
@@ -985,6 +990,12 @@ func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string
}
ignoreMaxMemoryRows := sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt)
vcursor.SetIgnoreMaxMemoryRows(ignoreMaxMemoryRows)
+ vcursor.SetWorkloadName(sqlparser.GetWorkloadNameFromStatement(stmt))
+ priority, err := sqlparser.GetPriorityFromStatement(stmt)
+ if err != nil {
+ return nil, err
+ }
+ vcursor.SetPriority(priority)
setVarComment, err := prepareSetVarComment(vcursor, stmt)
if err != nil {
@@ -1324,7 +1335,7 @@ func (e *Executor) startVStream(ctx context.Context, rss []*srvtopo.ResolvedShar
return err
}
- vsm := newVStreamManager(e.resolver.resolver, e.serv, e.cell)
+ vsm := newVStreamManager(e.resolver.resolver, e.serv, e.cell, e.allowVstreamCopy)
vs := &vstream{
vgtid: vgtid,
tabletType: topodatapb.TabletType_PRIMARY,
@@ -1337,6 +1348,7 @@ func (e *Executor) startVStream(ctx context.Context, rss []*srvtopo.ResolvedShar
vsm: vsm,
eventCh: make(chan []*binlogdatapb.VEvent),
ts: ts,
+ copyCompletedShard: make(map[string]struct{}),
}
_ = vs.stream(ctx)
return nil
diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go
index 4a2a1e7cfec..5185ece673e 100644
--- a/go/vt/vtgate/executor_framework_test.go
+++ b/go/vt/vtgate/executor_framework_test.go
@@ -449,7 +449,7 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn
bad.VSchema = badVSchema
getSandbox(KsTestUnsharded).VSchema = unshardedVSchema
- executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3)
+ executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3, false)
key.AnyShardPicker = DestinationAnyShardPickerFirstShard{}
// create a new session each time so that ShardSessions don't get re-used across tests
@@ -473,7 +473,7 @@ func createCustomExecutor(vschema string) (executor *Executor, sbc1, sbc2, sbclo
sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil)
getSandbox(KsTestUnsharded).VSchema = unshardedVSchema
- executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3)
+ executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3, false)
// create a new session each time so that ShardSessions don't get re-used across tests
primarySession = &vtgatepb.Session{
TargetString: "@primary",
@@ -502,7 +502,7 @@ func createCustomExecutorSetValues(vschema string, values []*sqltypes.Result) (e
sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil)
getSandbox(KsTestUnsharded).VSchema = unshardedVSchema
- executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3)
+ executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3, false)
// create a new session each time so that ShardSessions don't get re-used across tests
primarySession = &vtgatepb.Session{
TargetString: "@primary",
diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go
index e116cd83988..a44d85cc3da 100644
--- a/go/vt/vtgate/executor_select_test.go
+++ b/go/vt/vtgate/executor_select_test.go
@@ -19,12 +19,15 @@ package vtgate
import (
"context"
"fmt"
+ "os"
"runtime"
"strconv"
"strings"
"testing"
"time"
+ _flag "vitess.io/vitess/go/internal/flag"
+
"vitess.io/vitess/go/vt/sqlparser"
"github.com/google/go-cmp/cmp"
@@ -1473,7 +1476,7 @@ func TestStreamSelectIN(t *testing.T) {
}
func createExecutor(serv *sandboxTopo, cell string, resolver *Resolver) *Executor {
- return NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3)
+ return NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3, false)
}
func TestSelectScatter(t *testing.T) {
@@ -2998,7 +3001,7 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) {
count++
}
- executor := NewExecutor(context.Background(), serv, cell, resolver, true, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3)
+ executor := NewExecutor(context.Background(), serv, cell, resolver, true, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3, false)
before := runtime.NumGoroutine()
query := "select id, col from user order by id limit 2"
@@ -3611,7 +3614,7 @@ func TestSelectAggregationData(t *testing.T) {
}{
{
sql: `select count(distinct col) from user`,
- sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "2", "3"),
+ sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|weight_string(col)", "int64|varbinary"), "1|NULL", "2|NULL", "2|NULL", "3|NULL"),
expSandboxQ: "select col, weight_string(col) from `user` group by col, weight_string(col) order by col asc",
expField: `[name:"count(distinct col)" type:INT64]`,
expRow: `[[INT64(3)]]`,
@@ -3625,14 +3628,14 @@ func TestSelectAggregationData(t *testing.T) {
},
{
sql: `select col, count(*) from user group by col`,
- sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)", "int64|int64"), "1|3"),
+ sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)|weight_string(col)", "int64|int64|varbinary"), "1|3|NULL"),
expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc",
expField: `[name:"col" type:INT64 name:"count(*)" type:INT64]`,
expRow: `[[INT64(1) INT64(24)]]`,
},
{
sql: `select col, count(*) from user group by col limit 2`,
- sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)", "int64|int64"), "1|2", "2|1", "3|4"),
+ sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)|weight_string(col)", "int64|int64|varbinary"), "1|2|NULL", "2|1|NULL", "3|4|NULL"),
expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc limit :__upper_limit",
expField: `[name:"col" type:INT64 name:"count(*)" type:INT64]`,
expRow: `[[INT64(1) INT64(16)] [INT64(2) INT64(8)]]`,
@@ -3740,3 +3743,74 @@ func TestSelectHexAndBit(t *testing.T) {
require.NoError(t, err)
require.Equal(t, `[[UINT64(10) UINT64(10) UINT64(10) UINT64(10)]]`, fmt.Sprintf("%v", qr.Rows))
}
+
+func TestSelectAggregationRandom(t *testing.T) {
+ cell := "aa"
+ hc := discovery.NewFakeHealthCheck(nil)
+ createSandbox(KsTestSharded).VSchema = executorVSchema
+ getSandbox(KsTestUnsharded).VSchema = unshardedVSchema
+ serv := newSandboxForCells([]string{cell})
+ resolver := newTestResolver(hc, serv, cell)
+ shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"}
+ var conns []*sandboxconn.SandboxConn
+ for _, shard := range shards {
+ sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil)
+ conns = append(conns, sbc)
+
+ sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("a|b", "int64|int64"),
+ "null|null",
+ )})
+ }
+
+ conns[0].SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields("a|b", "int64|int64"),
+ "10|1",
+ )})
+
+ executor := createExecutor(serv, cell, resolver)
+ executor.pv = querypb.ExecuteOptions_Gen4
+ session := NewAutocommitSession(&vtgatepb.Session{})
+
+ rs, err := executor.Execute(context.Background(), "TestSelectCFC", session,
+ "select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as c from (select sum(a) as a, sum(b) as b from user) A", nil)
+ require.NoError(t, err)
+ assert.Equal(t, `[[INT64(10) INT64(1) DECIMAL(10.0000)]]`, fmt.Sprintf("%v", rs.Rows))
+}
+
+func TestMain(m *testing.M) {
+ _flag.ParseFlagsForTest()
+ os.Exit(m.Run())
+}
+
+func TestStreamJoinQuery(t *testing.T) {
+ // Special setup: Don't use createExecutorEnv.
+ cell := "aa"
+ hc := discovery.NewFakeHealthCheck(nil)
+ u := createSandbox(KsTestUnsharded)
+ s := createSandbox(KsTestSharded)
+ s.VSchema = executorVSchema
+ u.VSchema = unshardedVSchema
+ serv := newSandboxForCells([]string{cell})
+ resolver := newTestResolver(hc, serv, cell)
+ shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"}
+ for _, shard := range shards {
+ _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil)
+ }
+ executor := createExecutor(serv, cell, resolver)
+
+ sql := "select u.foo, u.apa, ue.bar, ue.apa from user u join user_extra ue on u.foo = ue.bar"
+ result, err := executorStream(executor, sql)
+ require.NoError(t, err)
+ wantResult := &sqltypes.Result{
+ Fields: append(sandboxconn.SingleRowResult.Fields, sandboxconn.SingleRowResult.Fields...),
+ }
+ wantRow := append(sandboxconn.StreamRowResult.Rows[0], sandboxconn.StreamRowResult.Rows[0]...)
+ for i := 0; i < 64; i++ {
+ wantResult.Rows = append(wantResult.Rows, wantRow)
+ }
+ require.Equal(t, len(wantResult.Rows), len(result.Rows))
+ for idx := 0; idx < 64; idx++ {
+ utils.MustMatch(t, wantResult.Rows[idx], result.Rows[idx], "mismatched on: ", strconv.Itoa(idx))
+ }
+}
diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go
index 8fea4ed985f..ee3038972c3 100644
--- a/go/vt/vtgate/executor_stream_test.go
+++ b/go/vt/vtgate/executor_stream_test.go
@@ -61,7 +61,7 @@ func TestStreamSQLSharded(t *testing.T) {
for _, shard := range shards {
_ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil)
}
- executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3)
+ executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3, false)
sql := "stream * from sharded_user_msgs"
result, err := executorStreamMessages(executor, sql)
diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go
index 6e80f3841aa..6ec76f0dd2a 100644
--- a/go/vt/vtgate/executor_test.go
+++ b/go/vt/vtgate/executor_test.go
@@ -493,7 +493,6 @@ func TestExecutorShowColumns(t *testing.T) {
sbclookup.BatchQueries = nil
})
}
-
}
func TestExecutorShow(t *testing.T) {
@@ -1826,6 +1825,47 @@ func TestGetPlanNormalized(t *testing.T) {
assertCacheContains(t, r, want)
}
+func TestGetPlanPriority(t *testing.T) {
+ testCases := []struct {
+ name string
+ sql string
+ expectedPriority string
+ expectedError error
+ }{
+ {name: "Invalid priority", sql: "select /*vt+ PRIORITY=something */ * from music_user_map", expectedPriority: "", expectedError: sqlparser.ErrInvalidPriority},
+ {name: "Valid priority", sql: "select /*vt+ PRIORITY=33 */ * from music_user_map", expectedPriority: "33", expectedError: nil},
+ {name: "empty priority", sql: "select * from music_user_map", expectedPriority: "", expectedError: nil},
+ }
+
+ session := NewSafeSession(&vtgatepb.Session{TargetString: "@unknown", Options: &querypb.ExecuteOptions{}})
+
+ for _, aTestCase := range testCases {
+ testCase := aTestCase
+
+ t.Run(testCase.name, func(t *testing.T) {
+ r, _, _, _ := createExecutorEnv()
+ r.normalize = true
+ logStats := logstats.NewLogStats(ctx, "Test", "", "", nil)
+ vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv)
+ assert.NoError(t, err)
+
+ stmt, err := sqlparser.Parse(testCase.sql)
+ assert.NoError(t, err)
+ crticalityFromStatement, _ := sqlparser.GetPriorityFromStatement(stmt)
+
+ _, err = r.getPlan(context.Background(), vCursor, testCase.sql, makeComments("/* some comment */"), map[string]*querypb.BindVariable{},
+ NewSafeSession(nil), logStats)
+ if testCase.expectedError != nil {
+ assert.ErrorIs(t, err, testCase.expectedError)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, testCase.expectedPriority, crticalityFromStatement)
+ assert.Equal(t, testCase.expectedPriority, vCursor.safeSession.Options.Priority)
+ }
+ })
+ }
+}
+
func TestPassthroughDDL(t *testing.T) {
executor, sbc1, sbc2, _ := createExecutorEnv()
primarySession.TargetString = "TestExecutor"
@@ -2124,9 +2164,8 @@ func TestExecutorExplain(t *testing.T) {
result, err = executorExec(executor, "explain format = vitess select 42", bindVars)
require.NoError(t, err)
- expected :=
- `[[VARCHAR("Projection") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")] ` +
- `[VARCHAR("└─ SingleRow") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")]]`
+ expected := `[[VARCHAR("Projection") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")] ` +
+ `[VARCHAR("└─ SingleRow") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")]]`
require.Equal(t,
`[[VARCHAR("Projection") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")] `+
`[VARCHAR("└─ SingleRow") VARCHAR("") VARCHAR("") VARCHAR("") VARCHAR("UNKNOWN") VARCHAR("")]]`,
diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go
index a1b387e2ad5..ce89b9ca505 100644
--- a/go/vt/vtgate/executor_vschema_ddl_test.go
+++ b/go/vt/vtgate/executor_vschema_ddl_test.go
@@ -373,13 +373,13 @@ func TestExecutorAddSequenceDDL(t *testing.T) {
}
time.Sleep(10 * time.Millisecond)
- stmt = "alter vschema on test_table add auto_increment id using test_seq"
+ stmt = "alter vschema on test_table add auto_increment id using `db-name`.`test_seq`"
if _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil); err != nil {
t.Error(err)
}
time.Sleep(10 * time.Millisecond)
- wantAutoInc := &vschemapb.AutoIncrement{Column: "id", Sequence: "test_seq"}
+ wantAutoInc := &vschemapb.AutoIncrement{Column: "id", Sequence: "`db-name`.test_seq"}
gotAutoInc := executor.vm.GetCurrentSrvVschema().Keyspaces[ksSharded].Tables["test_table"].AutoIncrement
if !reflect.DeepEqual(wantAutoInc, gotAutoInc) {
@@ -579,6 +579,18 @@ func TestExecutorAddDropVindexDDL(t *testing.T) {
}
utils.MustMatch(t, wantqr, qr)
+ // now make sure we can create another vindex that references a table with dashes (i.e. escaping is necessary)
+ stmt = "alter vschema on test2 add vindex test_lookup_fqn(c1,c2) using consistent_lookup_unique with owner=`test`, from=`c1,c2`, table=`test-keyspace`.`lookup-fqn`, to=`keyspace_id`"
+ _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
+ require.NoError(t, err)
+
+ _, vindex = waitForVindex(t, ks, "test_lookup_fqn", vschemaUpdates, executor)
+ require.Equal(t, "consistent_lookup_unique", vindex.Type)
+ require.Equal(t, "test", vindex.Owner)
+ require.Equal(t, "c1,c2", vindex.Params["from"])
+ require.Equal(t, "`test-keyspace`.`lookup-fqn`", vindex.Params["table"])
+ require.Equal(t, "keyspace_id", vindex.Params["to"])
+
stmt = "alter vschema on test2 add vindex nonexistent (c1,c2)"
_, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil)
require.EqualError(t, err, "vindex nonexistent does not exist in keyspace TestExecutor")
diff --git a/go/vt/vtgate/grpcvtgateconn/conn.go b/go/vt/vtgate/grpcvtgateconn/conn.go
index 1f29d1c8c47..0fb76dfefe4 100644
--- a/go/vt/vtgate/grpcvtgateconn/conn.go
+++ b/go/vt/vtgate/grpcvtgateconn/conn.go
@@ -53,7 +53,6 @@ func init() {
"vtclient",
"vtcombo",
"vtctl",
- "vtctld",
"vttestserver",
} {
servenv.OnParseFor(cmd, registerFlags)
diff --git a/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go b/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go
index cf272fe3606..55a067807bd 100644
--- a/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go
+++ b/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go
@@ -108,6 +108,7 @@ func TestGRPCVTGateConnAuth(t *testing.T) {
fs := pflag.NewFlagSet("", pflag.ContinueOnError)
grpcclient.RegisterFlags(fs)
+ grpcclient.ResetStaticAuth()
err = fs.Parse([]string{
"--grpc_auth_static_client_creds",
f.Name(),
@@ -148,6 +149,7 @@ func TestGRPCVTGateConnAuth(t *testing.T) {
fs = pflag.NewFlagSet("", pflag.ContinueOnError)
grpcclient.RegisterFlags(fs)
+ grpcclient.ResetStaticAuth()
err = fs.Parse([]string{
"--grpc_auth_static_client_creds",
f.Name(),
diff --git a/go/vt/vtgate/planbuilder/abstract/concatenate.go b/go/vt/vtgate/planbuilder/abstract/concatenate.go
index d75e739906e..4dba7d33ac7 100644
--- a/go/vt/vtgate/planbuilder/abstract/concatenate.go
+++ b/go/vt/vtgate/planbuilder/abstract/concatenate.go
@@ -40,13 +40,27 @@ func (*Concatenate) iLogical() {}
func (c *Concatenate) TableID() semantics.TableSet {
var tableSet semantics.TableSet
for _, source := range c.Sources {
- tableSet.MergeInPlace(source.TableID())
+ tableSet = tableSet.Merge(source.TableID())
}
return tableSet
}
// PushPredicate implements the Operator interface
func (c *Concatenate) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
+ op, err := c.tryPush(expr, semTable)
+ if err == nil {
+ return op, nil
+ }
+
+ // if we fail to push down the predicate, we can always evaluate it at the vtgate level
+ filter := &Filter{
+ Source: c,
+ Predicates: []sqlparser.Expr{expr},
+ }
+ return filter, nil
+}
+
+func (c *Concatenate) tryPush(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
newSources := make([]LogicalOperator, 0, len(c.Sources))
for index, source := range c.Sources {
if len(c.SelectStmts[index].SelectExprs) != 1 {
diff --git a/go/vt/vtgate/planbuilder/abstract/derived.go b/go/vt/vtgate/planbuilder/abstract/derived.go
index 508d576f37d..003267df7ff 100644
--- a/go/vt/vtgate/planbuilder/abstract/derived.go
+++ b/go/vt/vtgate/planbuilder/abstract/derived.go
@@ -17,9 +17,9 @@ limitations under the License.
package abstract
import (
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "io"
+
"vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
@@ -44,8 +44,11 @@ func (d *Derived) TableID() semantics.TableSet {
func (d *Derived) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTable) (LogicalOperator, error) {
tableInfo, err := semTable.TableInfoForExpr(expr)
if err != nil {
- if err == semantics.ErrMultipleTables {
- return nil, semantics.ProjError{Inner: vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: unable to split predicates to derived table: %s", sqlparser.String(expr))}
+ if err == semantics.ErrNotSingleTable {
+ return &Filter{
+ Source: d,
+ Predicates: []sqlparser.Expr{expr},
+ }, nil
}
return nil, err
}
@@ -54,11 +57,30 @@ func (d *Derived) PushPredicate(expr sqlparser.Expr, semTable *semantics.SemTabl
if err != nil {
return nil, err
}
+ if !canBePushedDownIntoDerived(newExpr) {
+ // if we have an aggregation, we don't want to push it inside
+ return &Filter{Source: d, Predicates: []sqlparser.Expr{expr}}, nil
+ }
newSrc, err := d.Inner.PushPredicate(newExpr, semTable)
d.Inner = newSrc
return d, err
}
+func canBePushedDownIntoDerived(expr sqlparser.Expr) (canBePushed bool) {
+ canBePushed = true
+ _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ switch node.(type) {
+ case *sqlparser.Max, *sqlparser.Min:
+ // empty by default
+ case sqlparser.AggrFunc:
+ canBePushed = false
+ return false, io.EOF
+ }
+ return true, nil
+ }, expr)
+ return
+}
+
// UnsolvedPredicates implements the Operator interface
func (d *Derived) UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr {
return d.Inner.UnsolvedPredicates(semTable)
diff --git a/go/vt/vtgate/planbuilder/abstract/queryprojection.go b/go/vt/vtgate/planbuilder/abstract/queryprojection.go
index 14572b117b2..51c074ff239 100644
--- a/go/vt/vtgate/planbuilder/abstract/queryprojection.go
+++ b/go/vt/vtgate/planbuilder/abstract/queryprojection.go
@@ -22,6 +22,8 @@ import (
"strings"
"vitess.io/vitess/go/vt/vtgate/engine"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/semantics"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
@@ -150,17 +152,14 @@ func CreateQPFromSelect(sel *sqlparser.Select) (*QueryProjection, error) {
}
for _, group := range sel.GroupBy {
selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(group)
- expr, weightStrExpr, err := qp.GetSimplifiedExpr(group)
- if err != nil {
- return nil, err
- }
+ weightStrExpr := qp.GetSimplifiedExpr(group)
err = checkForInvalidGroupingExpressions(weightStrExpr)
if err != nil {
return nil, err
}
groupBy := GroupBy{
- Inner: expr,
+ Inner: group,
WeightStrExpr: weightStrExpr,
InnerIndex: selectExprIdx,
aliasedExpr: aliasExpr,
@@ -272,17 +271,14 @@ func CreateQPFromUnion(union *sqlparser.Union) (*QueryProjection, error) {
func (qp *QueryProjection) addOrderBy(orderBy sqlparser.OrderBy) error {
canPushDownSorting := true
for _, order := range orderBy {
- expr, weightStrExpr, err := qp.GetSimplifiedExpr(order.Expr)
- if err != nil {
- return err
- }
+ weightStrExpr := qp.GetSimplifiedExpr(order.Expr)
if sqlparser.IsNull(weightStrExpr) {
// ORDER BY null can safely be ignored
continue
}
qp.OrderExprs = append(qp.OrderExprs, OrderBy{
Inner: &sqlparser.Order{
- Expr: expr,
+ Expr: order.Expr,
Direction: order.Direction,
},
WeightStrExpr: weightStrExpr,
@@ -314,39 +310,6 @@ func checkForInvalidAggregations(exp *sqlparser.AliasedExpr) error {
}, exp.Expr)
}
-func (qp *QueryProjection) getNonAggrExprNotMatchingGroupByExprs() sqlparser.SelectExpr {
- for _, expr := range qp.SelectExprs {
- if expr.Aggr {
- continue
- }
- if !qp.isExprInGroupByExprs(expr) {
- return expr.Col
- }
- }
- for _, order := range qp.OrderExprs {
- if !qp.isOrderByExprInGroupBy(order) {
- return &sqlparser.AliasedExpr{
- Expr: order.Inner.Expr,
- }
- }
- }
- return nil
-}
-
-func (qp *QueryProjection) isOrderByExprInGroupBy(order OrderBy) bool {
- // ORDER BY NULL or Aggregation functions need not be present in group by
- _, isAggregate := order.WeightStrExpr.(sqlparser.AggrFunc)
- if sqlparser.IsNull(order.Inner.Expr) || isAggregate {
- return true
- }
- for _, groupByExpr := range qp.groupByExprs {
- if sqlparser.EqualsExpr(groupByExpr.WeightStrExpr, order.WeightStrExpr) {
- return true
- }
- }
- return false
-}
-
func (qp *QueryProjection) isExprInGroupByExprs(expr SelectExpr) bool {
for _, groupByExpr := range qp.groupByExprs {
exp, err := expr.GetExpr()
@@ -361,34 +324,47 @@ func (qp *QueryProjection) isExprInGroupByExprs(expr SelectExpr) bool {
}
// GetSimplifiedExpr takes an expression used in ORDER BY or GROUP BY, and returns an expression that is simpler to evaluate
-func (qp *QueryProjection) GetSimplifiedExpr(e sqlparser.Expr) (expr sqlparser.Expr, weightStrExpr sqlparser.Expr, err error) {
+func (qp *QueryProjection) GetSimplifiedExpr(e sqlparser.Expr) (found sqlparser.Expr) {
+ if qp == nil {
+ return e
+ }
// If the ORDER BY is against a column alias, we need to remember the expression
// behind the alias. The weightstring(.) calls needs to be done against that expression and not the alias.
// Eg - select music.foo as bar, weightstring(music.foo) from music order by bar
- colExpr, isColName := e.(*sqlparser.ColName)
- if !isColName {
- return e, e, nil
- }
-
- if sqlparser.IsNull(e) {
- return e, nil, nil
+ in, isColName := e.(*sqlparser.ColName)
+ if !(isColName && in.Qualifier.IsEmpty()) {
+ // we are only interested in unqualified column names. if it's not a column name and not unqualified, we're done
+ return e
}
- if colExpr.Qualifier.IsEmpty() {
- for _, selectExpr := range qp.SelectExprs {
- aliasedExpr, isAliasedExpr := selectExpr.Col.(*sqlparser.AliasedExpr)
- if !isAliasedExpr {
+ for _, selectExpr := range qp.SelectExprs {
+ ae, ok := selectExpr.Col.(*sqlparser.AliasedExpr)
+ if !ok {
+ continue
+ }
+ aliased := !ae.As.IsEmpty()
+ if aliased {
+ if in.Name.Equal(ae.As) {
+ return ae.Expr
+ }
+ } else {
+ seCol, ok := ae.Expr.(*sqlparser.ColName)
+ if !ok {
continue
}
- isAliasExpr := !aliasedExpr.As.IsEmpty()
- if isAliasExpr && colExpr.Name.Equal(aliasedExpr.As) {
- return e, aliasedExpr.Expr, nil
+ if seCol.Name.Equal(in.Name) {
+ // If the column name matches, we have a match, even if the table name is not listed
+ return ae.Expr
}
}
}
- return e, e, nil
+ if found == nil {
+ found = e
+ }
+
+ return found
}
// toString should only be used for tests
@@ -457,10 +433,7 @@ func (qp *QueryProjection) NeedsDistinct() bool {
func (qp *QueryProjection) AggregationExpressions() (out []Aggr, err error) {
orderBy:
for _, orderExpr := range qp.OrderExprs {
- if qp.isOrderByExprInGroupBy(orderExpr) {
- continue orderBy
- }
- orderExpr := orderExpr.Inner.Expr
+ orderExpr := orderExpr.WeightStrExpr
for _, expr := range qp.SelectExprs {
col, ok := expr.Col.(*sqlparser.AliasedExpr)
if !ok {
@@ -611,6 +584,85 @@ func (qp *QueryProjection) GetColumnCount() int {
return len(qp.SelectExprs) - qp.AddedColumn
}
+// NeedsProjecting returns true if we have projections that need to be evaluated at the vtgate level
+// and can't be pushed down to MySQL
+func (qp *QueryProjection) NeedsProjecting(
+ ctx *plancontext.PlanningContext,
+ pusher func(expr *sqlparser.AliasedExpr) (int, error),
+) (needsVtGateEval bool, expressions []sqlparser.Expr, colNames []string, err error) {
+ for _, se := range qp.SelectExprs {
+ var ae *sqlparser.AliasedExpr
+ ae, err = se.GetAliasedExpr()
+ if err != nil {
+ return false, nil, nil, err
+ }
+
+ expr := ae.Expr
+ colNames = append(colNames, ae.ColumnName())
+
+ if _, isCol := expr.(*sqlparser.ColName); isCol {
+ offset, err := pusher(ae)
+ if err != nil {
+ return false, nil, nil, err
+ }
+ expressions = append(expressions, sqlparser.NewOffset(offset, expr))
+ continue
+ }
+
+ rExpr := sqlparser.Rewrite(sqlparser.CloneExpr(expr), func(cursor *sqlparser.Cursor) bool {
+ col, isCol := cursor.Node().(*sqlparser.ColName)
+ if !isCol {
+ return true
+ }
+ var tableInfo semantics.TableInfo
+ tableInfo, err = ctx.SemTable.TableInfoForExpr(col)
+ if err != nil {
+ return true
+ }
+ _, isDT := tableInfo.(*semantics.DerivedTable)
+ if !isDT {
+ return true
+ }
+
+ var rewritten sqlparser.Expr
+ rewritten, err = semantics.RewriteDerivedTableExpression(col, tableInfo)
+ if err != nil {
+ return false
+ }
+ if sqlparser.ContainsAggregation(rewritten) {
+ offset, tErr := pusher(&sqlparser.AliasedExpr{Expr: col})
+ if tErr != nil {
+ err = tErr
+ return false
+ }
+
+ cursor.Replace(sqlparser.NewOffset(offset, col))
+ }
+ return true
+ }, nil).(sqlparser.Expr)
+
+ if err != nil {
+ return
+ }
+
+ if !sqlparser.EqualsExpr(rExpr, expr) {
+ // if we changed the expression, it means that we have to evaluate the rest at the vtgate level
+ expressions = append(expressions, rExpr)
+ needsVtGateEval = true
+ continue
+ }
+
+ // we did not need to push any parts of this expression down. Let's check if we can push all of it
+ offset, err := pusher(ae)
+ if err != nil {
+ return false, nil, nil, err
+ }
+ expressions = append(expressions, sqlparser.NewOffset(offset, expr))
+ }
+
+ return
+}
+
func checkForInvalidGroupingExpressions(expr sqlparser.Expr) error {
return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) {
if _, isAggregate := node.(sqlparser.AggrFunc); isAggregate {
diff --git a/go/vt/vtgate/planbuilder/aggregation_pushing.go b/go/vt/vtgate/planbuilder/aggregation_pushing.go
index e719c72fdfa..5e9da131063 100644
--- a/go/vt/vtgate/planbuilder/aggregation_pushing.go
+++ b/go/vt/vtgate/planbuilder/aggregation_pushing.go
@@ -429,6 +429,10 @@ func isMinOrMax(in engine.AggregateOpcode) bool {
}
}
+func isRandom(in engine.AggregateOpcode) bool {
+ return in == engine.AggregateRandom
+}
+
func splitAggregationsToLeftAndRight(
ctx *plancontext.PlanningContext,
aggregations []abstract.Aggr,
@@ -443,8 +447,8 @@ func splitAggregationsToLeftAndRight(
} else {
deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr)
var other *abstract.Aggr
- // if we are sending down min/max, we don't have to multiply the results with anything
- if !isMinOrMax(aggr.OpCode) {
+ // if we are sending down min/max/random, we don't have to multiply the results with anything
+ if !isMinOrMax(aggr.OpCode) && !isRandom(aggr.OpCode) {
other = countStarAggr()
}
switch {
diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go
index e86cf6aaff5..2a7ffebf91c 100644
--- a/go/vt/vtgate/planbuilder/collations_test.go
+++ b/go/vt/vtgate/planbuilder/collations_test.go
@@ -40,7 +40,7 @@ type collationTestCase struct {
func (tc *collationTestCase) run(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", false),
+ v: loadSchema(t, "vschemas/schema.json", false),
sysVarEnabled: true,
version: Gen4,
}
diff --git a/go/vt/vtgate/planbuilder/concatenateGen4.go b/go/vt/vtgate/planbuilder/concatenateGen4.go
index 152e4150961..72b27ef3450 100644
--- a/go/vt/vtgate/planbuilder/concatenateGen4.go
+++ b/go/vt/vtgate/planbuilder/concatenateGen4.go
@@ -104,7 +104,7 @@ func (c *concatenateGen4) Rewrite(inputs ...logicalPlan) error {
func (c *concatenateGen4) ContainsTables() semantics.TableSet {
var tableSet semantics.TableSet
for _, source := range c.sources {
- tableSet.MergeInPlace(source.ContainsTables())
+ tableSet = tableSet.Merge(source.ContainsTables())
}
return tableSet
}
diff --git a/go/vt/vtgate/planbuilder/gen4_planner.go b/go/vt/vtgate/planbuilder/gen4_planner.go
index 924169fc966..67604f4e9be 100644
--- a/go/vt/vtgate/planbuilder/gen4_planner.go
+++ b/go/vt/vtgate/planbuilder/gen4_planner.go
@@ -222,13 +222,13 @@ func newBuildSelectPlan(
return nil, nil, err
}
- plan = optimizePlan(plan)
-
plan, err = planHorizon(ctx, plan, selStmt, true)
if err != nil {
return nil, nil, err
}
+ optimizePlan(plan)
+
sel, isSel := selStmt.(*sqlparser.Select)
if isSel {
if err := setMiscFunc(plan, sel); err != nil {
@@ -249,25 +249,25 @@ func newBuildSelectPlan(
}
// optimizePlan removes unnecessary simpleProjections that have been created while planning
-func optimizePlan(plan logicalPlan) logicalPlan {
- newPlan, _ := visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) {
- this, ok := plan.(*simpleProjection)
- if !ok {
- return true, plan, nil
- }
+func optimizePlan(plan logicalPlan) {
+ for _, lp := range plan.Inputs() {
+ optimizePlan(lp)
+ }
- input, ok := this.input.(*simpleProjection)
- if !ok {
- return true, plan, nil
- }
+ this, ok := plan.(*simpleProjection)
+ if !ok {
+ return
+ }
- for i, col := range this.eSimpleProj.Cols {
- this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col]
- }
- this.input = input.input
- return true, this, nil
- })
- return newPlan
+ input, ok := this.input.(*simpleProjection)
+ if !ok {
+ return
+ }
+
+ for i, col := range this.eSimpleProj.Cols {
+ this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col]
+ }
+ this.input = input.input
}
func gen4UpdateStmtPlanner(
diff --git a/go/vt/vtgate/planbuilder/horizon_planning.go b/go/vt/vtgate/planbuilder/horizon_planning.go
index d2c17436a00..550d3334eaa 100644
--- a/go/vt/vtgate/planbuilder/horizon_planning.go
+++ b/go/vt/vtgate/planbuilder/horizon_planning.go
@@ -59,7 +59,8 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
// a simpleProjection. We create a new Route that contains the derived table in the
// FROM clause. Meaning that, when we push expressions to the select list of this
// new Route, we do not want them to rewrite them.
- if _, isSimpleProj := plan.(*simpleProjection); isSimpleProj {
+ sp, derivedTable := plan.(*simpleProjection)
+ if derivedTable {
oldRewriteDerivedExpr := ctx.RewriteDerivedExpr
defer func() {
ctx.RewriteDerivedExpr = oldRewriteDerivedExpr
@@ -74,10 +75,11 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
}
needsOrdering := len(hp.qp.OrderExprs) > 0
- canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering
// If we still have a HAVING clause, it's because it could not be pushed to the WHERE,
// so it probably has aggregations
+ canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering
+
switch {
case hp.qp.NeedsAggregation() || hp.sel.Having != nil:
plan, err = hp.planAggregations(ctx, plan)
@@ -91,6 +93,26 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo
if err != nil {
return nil, err
}
+ case derivedTable:
+ pusher := func(ae *sqlparser.AliasedExpr) (int, error) {
+ offset, _, err := pushProjection(ctx, ae, sp.input, true, true, false)
+ return offset, err
+ }
+ needsVtGate, projections, colNames, err := hp.qp.NeedsProjecting(ctx, pusher)
+ if err != nil {
+ return nil, err
+ }
+ if !needsVtGate {
+ break
+ }
+
+ // there were some expressions we could not push down entirely,
+ // so replace the simpleProjection with a real projection
+ plan = &projection{
+ source: sp.input,
+ columns: projections,
+ columnNames: colNames,
+ }
default:
err = pushProjections(ctx, plan, hp.qp.SelectExprs)
if err != nil {
@@ -204,7 +226,7 @@ func checkIfAlreadyExists(expr *sqlparser.AliasedExpr, node sqlparser.SelectStat
selectExprDep := semTable.RecursiveDeps(selectExpr.Expr)
// Check that the two expressions have the same dependencies
- if !selectExprDep.Equals(exprDep) {
+ if selectExprDep != exprDep {
continue
}
@@ -395,7 +417,13 @@ func generateAggregateParams(aggrs []abstract.Aggr, aggrParamOffsets [][]offsets
aggrExpr = &sqlparser.BinaryExpr{
Operator: sqlparser.MultOp,
Left: aggrExpr,
- Right: curr,
+ Right: &sqlparser.FuncExpr{
+ Name: sqlparser.NewIdentifierCI("coalesce"),
+ Exprs: sqlparser.SelectExprs{
+ &sqlparser.AliasedExpr{Expr: curr},
+ &sqlparser.AliasedExpr{Expr: sqlparser.NewIntLiteral("1")},
+ },
+ },
}
}
}
@@ -503,10 +531,8 @@ func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext,
continue
}
- inner, innerWS, err := hp.qp.GetSimplifiedExpr(expr.Func.GetArg())
- if err != nil {
- return nil, nil, nil, err
- }
+ inner := expr.Func.GetArg()
+ innerWS := hp.qp.GetSimplifiedExpr(inner)
if exprHasVindex(ctx.SemTable, innerWS, false) {
aggrs = append(aggrs, expr)
continue
@@ -562,13 +588,10 @@ func newOffset(col int) offsets {
func (hp *horizonPlanning) createGroupingsForColumns(columns []*sqlparser.ColName) ([]abstract.GroupBy, error) {
var lhsGrouping []abstract.GroupBy
for _, lhsColumn := range columns {
- expr, wsExpr, err := hp.qp.GetSimplifiedExpr(lhsColumn)
- if err != nil {
- return nil, err
- }
+ wsExpr := hp.qp.GetSimplifiedExpr(lhsColumn)
lhsGrouping = append(lhsGrouping, abstract.GroupBy{
- Inner: expr,
+ Inner: lhsColumn,
WeightStrExpr: wsExpr,
})
}
diff --git a/go/vt/vtgate/planbuilder/operator_to_query.go b/go/vt/vtgate/planbuilder/operator_to_query.go
index 87dec2d8526..1e2d4ff54c4 100644
--- a/go/vt/vtgate/planbuilder/operator_to_query.go
+++ b/go/vt/vtgate/planbuilder/operator_to_query.go
@@ -89,7 +89,7 @@ func buildQuery(op abstract.PhysicalOperator, qb *queryBuilder) {
sel.Limit = opQuery.Limit
sel.OrderBy = opQuery.OrderBy
sel.GroupBy = opQuery.GroupBy
- sel.Having = opQuery.Having
+ sel.Having = mergeHaving(sel.Having, opQuery.Having)
sel.SelectExprs = opQuery.SelectExprs
qb.addTableExpr(op.Alias, op.Alias, op.TableID(), &sqlparser.DerivedTable{
Select: sel,
@@ -161,12 +161,16 @@ func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) {
}
sel := qb.sel.(*sqlparser.Select)
- if sel.Where == nil {
- sel.AddWhere(expr)
- return
+ _, isSubQuery := expr.(*sqlparser.ExtractedSubquery)
+ var addPred func(sqlparser.Expr)
+
+ if sqlparser.ContainsAggregation(expr) && !isSubQuery {
+ addPred = sel.AddHaving
+ } else {
+ addPred = sel.AddWhere
}
for _, exp := range sqlparser.SplitAndExpression(nil, expr) {
- sel.AddWhere(exp)
+ addPred(exp)
}
}
@@ -288,3 +292,17 @@ func (ts *tableSorter) Less(i, j int) bool {
func (ts *tableSorter) Swap(i, j int) {
ts.sel.From[i], ts.sel.From[j] = ts.sel.From[j], ts.sel.From[i]
}
+
+func mergeHaving(h1, h2 *sqlparser.Where) *sqlparser.Where {
+ switch {
+ case h1 == nil && h2 == nil:
+ return nil
+ case h1 == nil:
+ return h2
+ case h2 == nil:
+ return h1
+ default:
+ h1.Expr = sqlparser.AndExpressions(h1.Expr, h2.Expr)
+ return h1
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go
index 5f6d0e5da96..7659bb09cfa 100644
--- a/go/vt/vtgate/planbuilder/operator_transformers.go
+++ b/go/vt/vtgate/planbuilder/operator_transformers.go
@@ -409,25 +409,22 @@ func pushWeightStringForDistinct(ctx *plancontext.PlanningContext, plan logicalP
}
node.noNeedToTypeCheck = append(node.noNeedToTypeCheck, newOffset)
case *joinGen4:
- lhsSolves := node.Left.ContainsTables()
- rhsSolves := node.Right.ContainsTables()
- expr := node.OutputColumns()[offset]
- aliasedExpr, isAliased := expr.(*sqlparser.AliasedExpr)
- if !isAliased {
- return 0, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "cannot convert select expression to an aliased expression")
- }
- deps := ctx.SemTable.RecursiveDeps(aliasedExpr.Expr)
+ joinOffset := node.Cols[offset]
switch {
- case deps.IsSolvedBy(lhsSolves):
- offset, err = pushWeightStringForDistinct(ctx, node.Left, offset)
- node.Cols = append(node.Cols, -(offset + 1))
- case deps.IsSolvedBy(rhsSolves):
- offset, err = pushWeightStringForDistinct(ctx, node.Right, offset)
- node.Cols = append(node.Cols, offset+1)
+ case joinOffset < 0:
+ offset, err = pushWeightStringForDistinct(ctx, node.Left, -(joinOffset + 1))
+ offset = -(offset + 1)
+ case joinOffset > 0:
+ offset, err = pushWeightStringForDistinct(ctx, node.Right, joinOffset-1)
+ offset = offset + 1
default:
- return 0, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "cannot push distinct weight string to both sides of the join")
+ return 0, vterrors.New(vtrpcpb.Code_INTERNAL, "[BUG]: wrong column offset in join plan to push DISTINCT WEIGHT_STRING")
}
- newOffset = len(node.Cols) - 1
+ if err != nil {
+ return 0, err
+ }
+ newOffset = len(node.Cols)
+ node.Cols = append(node.Cols, offset)
default:
return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "bug: not supported pushWeightStringForDistinct on %T", plan)
}
diff --git a/go/vt/vtgate/planbuilder/physical/operator_funcs.go b/go/vt/vtgate/planbuilder/physical/operator_funcs.go
index 16757c041b1..1ea8d88822a 100644
--- a/go/vt/vtgate/planbuilder/physical/operator_funcs.go
+++ b/go/vt/vtgate/planbuilder/physical/operator_funcs.go
@@ -125,8 +125,11 @@ func PushPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op abs
case *Derived:
tableInfo, err := ctx.SemTable.TableInfoForExpr(expr)
if err != nil {
- if err == semantics.ErrMultipleTables {
- return nil, semantics.ProjError{Inner: vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: unable to split predicates to derived table: %s", sqlparser.String(expr))}
+ if err == semantics.ErrNotSingleTable {
+ return &Filter{
+ Source: op,
+ Predicates: []sqlparser.Expr{expr},
+ }, nil
}
return nil, err
}
@@ -228,11 +231,10 @@ func PushOutputColumns(ctx *plancontext.PlanningContext, op abstract.PhysicalOpe
var pos int
op.ColumnsOffset, pos = addToIntSlice(op.ColumnsOffset, i)
offsets = append(offsets, pos)
- // skip adding to columns as it exists already.
+ op.Columns = append(op.Columns, col)
if i > -1 {
continue
}
- op.Columns = append(op.Columns, col)
noQualifierNames = append(noQualifierNames, sqlparser.NewColName(col.Name.String()))
}
if len(noQualifierNames) > 0 {
@@ -345,7 +347,7 @@ func BreakExpressionInLHSandRHS(
switch node := cursor.Node().(type) {
case *sqlparser.ColName:
deps := ctx.SemTable.RecursiveDeps(node)
- if deps.NumberOfTables() == 0 {
+ if deps.IsEmpty() {
err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown column. has the AST been copied?")
return false
}
diff --git a/go/vt/vtgate/planbuilder/physical/route_planning.go b/go/vt/vtgate/planbuilder/physical/route_planning.go
index caedb1ab184..6e177ee71ba 100644
--- a/go/vt/vtgate/planbuilder/physical/route_planning.go
+++ b/go/vt/vtgate/planbuilder/physical/route_planning.go
@@ -226,6 +226,19 @@ func createPhysicalOperatorFromDelete(ctx *plancontext.PlanningContext, op *abst
return nil, err
}
+ if !vindexTable.Keyspace.Sharded {
+ return &Route{
+ Source: &Delete{
+ QTable: op.Table,
+ VTable: vindexTable,
+ AST: op.AST,
+ },
+ RouteOpCode: opCode,
+ Keyspace: vindexTable.Keyspace,
+ TargetDestination: dest,
+ }, nil
+ }
+
primaryVindex, vindexAndPredicates, err := getVindexInformation(op.TableID(), op.Table.Predicates, vindexTable)
if err != nil {
return nil, err
diff --git a/go/vt/vtgate/planbuilder/physical/subquery_planning.go b/go/vt/vtgate/planbuilder/physical/subquery_planning.go
index dd27c7fa7ae..26a77e12db5 100644
--- a/go/vt/vtgate/planbuilder/physical/subquery_planning.go
+++ b/go/vt/vtgate/planbuilder/physical/subquery_planning.go
@@ -84,21 +84,6 @@ func optimizeSubQuery(ctx *plancontext.PlanningContext, op *abstract.SubQuery) (
func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Route, subq *abstract.SubQueryInner) (*Route, error) {
subq.ExtractedSubquery.NeedsRewrite = true
-
- // go over the subquery and add its tables to the one's solved by the route it is merged with
- // this is needed to so that later when we try to push projections, we get the correct
- // solved tableID from the route, since it also includes the tables from the subquery after merging
- err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
- switch n := node.(type) {
- case *sqlparser.AliasedTableExpr:
- ts := outer.TableID()
- ts.MergeInPlace(ctx.SemTable.TableSetFor(n))
- }
- return true, nil
- }, subq.ExtractedSubquery.Subquery)
- if err != nil {
- return nil, err
- }
outer.SysTableTableSchema = append(outer.SysTableTableSchema, inner.SysTableTableSchema...)
for k, v := range inner.SysTableTableName {
if outer.SysTableTableName == nil {
@@ -126,7 +111,7 @@ func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Rout
}
}
- err = outer.resetRoutingSelections(ctx)
+ err := outer.resetRoutingSelections(ctx)
if err != nil {
return nil, err
}
@@ -353,11 +338,9 @@ func rewriteColumnsInSubqueryOpForJoin(
// update the dependencies for the subquery by removing the dependencies from the innerOp
tableSet := ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery]
- tableSet.RemoveInPlace(resultInnerOp.TableID())
- ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet
+ ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(resultInnerOp.TableID())
tableSet = ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery]
- tableSet.RemoveInPlace(resultInnerOp.TableID())
- ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet
+ ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(resultInnerOp.TableID())
// return any error while rewriting
return resultInnerOp, rewriteError
@@ -388,7 +371,7 @@ func createCorrelatedSubqueryOp(
// we do so by checking that the column names are the same and their recursive dependencies are the same
// so if the column names user.a and a would also be equal if the latter is also referencing the user table
for colName, bindVar := range bindVars {
- if node.Name.Equal(colName.Name) && ctx.SemTable.RecursiveDeps(node).Equals(ctx.SemTable.RecursiveDeps(colName)) {
+ if node.Name.Equal(colName.Name) && (ctx.SemTable.RecursiveDeps(node) == ctx.SemTable.RecursiveDeps(colName)) {
cursor.Replace(sqlparser.NewArgument(bindVar))
return false
}
diff --git a/go/vt/vtgate/planbuilder/physical/union.go b/go/vt/vtgate/planbuilder/physical/union.go
index 951314efe28..533d3e99bdd 100644
--- a/go/vt/vtgate/planbuilder/physical/union.go
+++ b/go/vt/vtgate/planbuilder/physical/union.go
@@ -37,7 +37,7 @@ var _ abstract.PhysicalOperator = (*Union)(nil)
func (u *Union) TableID() semantics.TableSet {
ts := semantics.EmptyTableSet()
for _, source := range u.Sources {
- ts.MergeInPlace(source.TableID())
+ ts = ts.Merge(source.TableID())
}
return ts
}
diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go
index 8ac609dc4e4..adffdfc8c22 100644
--- a/go/vt/vtgate/planbuilder/plan_test.go
+++ b/go/vt/vtgate/planbuilder/plan_test.go
@@ -17,18 +17,22 @@ limitations under the License.
package planbuilder
import (
- "bufio"
+ "bytes"
"context"
"encoding/json"
"errors"
"fmt"
- "io"
"math/rand"
"os"
+ "path/filepath"
"runtime/debug"
"strings"
"testing"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/servenv"
+
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
"vitess.io/vitess/go/test/utils"
@@ -108,6 +112,7 @@ func (*nameLkpIndex) Cost() int { return 3 }
func (*nameLkpIndex) IsUnique() bool { return false }
func (*nameLkpIndex) NeedsVCursor() bool { return false }
func (*nameLkpIndex) AllowBatch() bool { return true }
+func (*nameLkpIndex) AutoCommitEnabled() bool { return false }
func (*nameLkpIndex) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL }
func (*nameLkpIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) {
return []bool{}, nil
@@ -208,28 +213,15 @@ func init() {
vindexes.Register("multiCol_test", newMultiColIndex)
}
-const (
- samePlanMarker = "Gen4 plan same as above\n"
- gen4ErrorPrefix = "Gen4 error: "
-)
-
func makeTestOutput(t *testing.T) string {
testOutputTempDir := utils.MakeTestOutput(t, "testdata", "plan_test")
- t.Cleanup(func() {
- if !t.Failed() {
- _ = os.RemoveAll(testOutputTempDir)
- } else {
- t.Logf("Errors found in plantests. If the output is correct, run `cp %s/* testdata/` to update test expectations", testOutputTempDir)
- }
- })
-
return testOutputTempDir
}
func TestPlan(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
sysVarEnabled: true,
}
testOutputTempDir := makeTestOutput(t)
@@ -240,126 +232,136 @@ func TestPlan(t *testing.T) {
// the column is named as Id. This is to make sure that
// column names are case-preserved, but treated as
// case-insensitive even if they come from the vschema.
- testFile(t, "aggr_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "dml_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "from_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "filter_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "postprocess_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "select_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "symtab_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "unsupported_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "vindex_func_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "wireup_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "memory_sort_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "use_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "set_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "union_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "transaction_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "lock_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "large_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "ddl_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "flush_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "show_cases_no_default_keyspace.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "stream_cases.txt", testOutputTempDir, vschemaWrapper, false)
- testFile(t, "systemtables_cases.txt", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "aggr_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "dml_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "from_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "filter_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "postprocess_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "select_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "symtab_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "unsupported_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "vindex_func_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "wireup_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "memory_sort_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "use_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "set_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "union_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "large_union_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "transaction_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "lock_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "large_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "ddl_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "flush_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "show_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "stream_cases.json", testOutputTempDir, vschemaWrapper, false)
+ testFile(t, "systemtables_cases80.json", testOutputTempDir, vschemaWrapper, false)
+}
+
+func TestSystemTables57(t *testing.T) {
+ // first we move everything to use 5.7 logic
+ servenv.SetMySQLServerVersionForTest("5.7")
+ defer servenv.SetMySQLServerVersionForTest("")
+ vschemaWrapper := &vschemaWrapper{v: loadSchema(t, "vschemas/schema.json", true)}
+ testOutputTempDir := makeTestOutput(t)
+ testFile(t, "systemtables_cases57.json", testOutputTempDir, vschemaWrapper, false)
}
func TestSysVarSetDisabled(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
sysVarEnabled: false,
}
- testFile(t, "set_sysvar_disabled_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "set_sysvar_disabled_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestOne(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithMainAsDefault(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
},
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithSecondUserAsDefault(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "second_user",
Sharded: true,
},
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithUserAsDefault(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "user",
Sharded: true,
},
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestOneWithTPCHVSchema(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "tpch_schema_test.json", true),
+ v: loadSchema(t, "vschemas/tpch_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "onecase.txt", "", vschema, false)
+ testFile(t, "onecase.json", "", vschema, false)
}
func TestRubyOnRailsQueries(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "rails_schema_test.json", true),
+ v: loadSchema(t, "vschemas/rails_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "rails_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "rails_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestOLTP(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "oltp_schema_test.json", true),
+ v: loadSchema(t, "vschemas/oltp_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "oltp_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "oltp_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestTPCC(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "tpcc_schema_test.json", true),
+ v: loadSchema(t, "vschemas/tpcc_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "tpcc_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "tpcc_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func TestTPCH(t *testing.T) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(t, "tpch_schema_test.json", true),
+ v: loadSchema(t, "vschemas/tpch_schema.json", true),
sysVarEnabled: true,
}
- testFile(t, "tpch_cases.txt", makeTestOutput(t), vschemaWrapper, false)
+ testFile(t, "tpch_cases.json", makeTestOutput(t), vschemaWrapper, false)
}
func BenchmarkOLTP(b *testing.B) {
@@ -376,14 +378,11 @@ func BenchmarkTPCH(b *testing.B) {
func benchmarkWorkload(b *testing.B, name string) {
vschemaWrapper := &vschemaWrapper{
- v: loadSchema(b, name+"_schema_test.json", true),
+ v: loadSchema(b, "vschemas/"+name+"_schema.json", true),
sysVarEnabled: true,
}
- var testCases []testCase
- for tc := range iterateExecFile(name + "_cases.txt") {
- testCases = append(testCases, tc)
- }
+ testCases := readJSONTests(name + "_cases.json")
b.ResetTimer()
for _, version := range plannerVersions {
b.Run(version.String(), func(b *testing.B) {
@@ -394,7 +393,7 @@ func benchmarkWorkload(b *testing.B, name string) {
func TestBypassPlanningShardTargetFromFile(t *testing.T) {
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -402,13 +401,13 @@ func TestBypassPlanningShardTargetFromFile(t *testing.T) {
tabletType: topodatapb.TabletType_PRIMARY,
dest: key.DestinationShard("-80")}
- testFile(t, "bypass_shard_cases.txt", makeTestOutput(t), vschema, false)
+ testFile(t, "bypass_shard_cases.json", makeTestOutput(t), vschema, false)
}
func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) {
keyRange, _ := key.ParseShardingSpec("-")
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -417,13 +416,13 @@ func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) {
dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]},
}
- testFile(t, "bypass_keyrange_cases.txt", makeTestOutput(t), vschema, false)
+ testFile(t, "bypass_keyrange_cases.json", makeTestOutput(t), vschema, false)
}
func TestWithDefaultKeyspaceFromFile(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -432,18 +431,18 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "alterVschema_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "ddl_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "migration_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "flush_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "show_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "call_cases.txt", testOutputTempDir, vschema, false)
+ testFile(t, "alterVschema_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "ddl_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "migration_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "flush_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "show_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "call_cases.json", testOutputTempDir, vschema, false)
}
func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "second_user",
Sharded: true,
@@ -452,13 +451,13 @@ func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "select_cases_with_default.txt", testOutputTempDir, vschema, false)
+ testFile(t, "select_cases_with_default.json", testOutputTempDir, vschema, false)
}
func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "user",
Sharded: true,
@@ -467,24 +466,24 @@ func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "select_cases_with_user_as_default.txt", testOutputTempDir, vschema, false)
+ testFile(t, "select_cases_with_user_as_default.json", testOutputTempDir, vschema, false)
}
func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{Name: "information_schema"},
tabletType: topodatapb.TabletType_PRIMARY,
}
- testFile(t, "sysschema_default.txt", makeTestOutput(t), vschema, false)
+ testFile(t, "sysschema_default.json", makeTestOutput(t), vschema, false)
}
func TestOtherPlanningFromFile(t *testing.T) {
// We are testing this separately so we can set a default keyspace
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
keyspace: &vindexes.Keyspace{
Name: "main",
Sharded: false,
@@ -493,8 +492,8 @@ func TestOtherPlanningFromFile(t *testing.T) {
}
testOutputTempDir := makeTestOutput(t)
- testFile(t, "other_read_cases.txt", testOutputTempDir, vschema, false)
- testFile(t, "other_admin_cases.txt", testOutputTempDir, vschema, false)
+ testFile(t, "other_read_cases.json", testOutputTempDir, vschema, false)
+ testFile(t, "other_admin_cases.json", testOutputTempDir, vschema, false)
}
func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSchema {
@@ -720,14 +719,48 @@ func (vw *vschemaWrapper) FindRoutedShard(keyspace, shard string) (string, error
return "", nil
}
+type (
+ planTest struct {
+ Comment string `json:"comment,omitempty"`
+ Query string `json:"query,omitempty"`
+ Plan json.RawMessage `json:"plan,omitempty"`
+ V3Plan json.RawMessage `json:"v3-plan,omitempty"`
+ Gen4Plan json.RawMessage `json:"gen4-plan,omitempty"`
+ }
+)
+
+func compacted(in string) string {
+ if in != "" && in[0] != '{' {
+ return in
+ }
+ dst := bytes.NewBuffer(nil)
+ err := json.Compact(dst, []byte(in))
+ if err != nil {
+ panic(err)
+ }
+ return dst.String()
+}
+
func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, render bool) {
t.Run(filename, func(t *testing.T) {
- expected := &strings.Builder{}
+ var expected []planTest
var outFirstPlanner string
- for tcase := range iterateExecFile(filename) {
- t.Run(fmt.Sprintf("%d V3: %s", tcase.lineno, tcase.comments), func(t *testing.T) {
+ for _, tcase := range readJSONTests(filename) {
+ if tcase.V3Plan == nil {
+ tcase.V3Plan = tcase.Plan
+ tcase.Gen4Plan = tcase.Plan
+ }
+ current := planTest{}
+ testName := tcase.Comment
+ if testName == "" {
+ testName = tcase.Query
+ }
+ if tcase.Query == "" {
+ continue
+ }
+ t.Run(fmt.Sprintf("V3: %s", testName), func(t *testing.T) {
vschema.version = V3
- plan, err := TestBuilder(tcase.input, vschema, vschema.currentDb())
+ plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb())
if render && plan != nil {
viz, err := engine.GraphViz(plan.Instructions)
if err == nil {
@@ -736,24 +769,20 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, r
}
out := getPlanOrErrorOutput(err, plan)
- if out != tcase.output {
- t.Errorf("V3 - %s:%d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output, out), tcase.output, out)
- }
- if err != nil {
- out = `"` + out + `"`
+ lft := compacted(out)
+ rgt := compacted(string(tcase.V3Plan))
+ if lft != rgt {
+ t.Errorf("V3 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, cmp.Diff(tcase.V3Plan, out), tcase.V3Plan, out)
}
- outFirstPlanner = out
- expected.WriteString(tcase.comments)
- encoder := json.NewEncoder(expected)
- encoder.Encode(tcase.input)
- expected.WriteString(fmt.Sprintf("%s\n", out))
+ outFirstPlanner = out
+ current.Comment = testName
+ current.Query = tcase.Query
})
vschema.version = Gen4
out, err := getPlanOutput(tcase, vschema, render)
- if err != nil && tcase.output2ndPlanner == "" && strings.HasPrefix(err.Error(), "gen4 does not yet support") {
- expected.WriteString("\n")
+ if err != nil && len(tcase.Gen4Plan) == 0 && strings.HasPrefix(err.Error(), "gen4 does not yet support") {
continue
}
@@ -764,43 +793,57 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, r
// this is shown by not having any info at all after the result for the V3 planner
// with this last expectation, it is an error if the Gen4 planner
// produces the same plan as the V3 planner does
- testName := fmt.Sprintf("%d Gen4: %s", tcase.lineno, tcase.comments)
- t.Run(testName, func(t *testing.T) {
- if out != tcase.output2ndPlanner {
- t.Errorf("Gen4 - %s:%d\nDiff:\n%s\n[%s] \n[%s]", filename, tcase.lineno, cmp.Diff(tcase.output2ndPlanner, out), tcase.output2ndPlanner, out)
- }
- if err != nil {
- out = `"` + out + `"`
+ t.Run(fmt.Sprintf("Gen4: %s", testName), func(t *testing.T) {
+ if compacted(out) != compacted(string(tcase.Gen4Plan)) {
+ t.Errorf("Gen4 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, cmp.Diff(tcase.Gen4Plan, out), tcase.Gen4Plan, out)
}
if outFirstPlanner == out {
- expected.WriteString(samePlanMarker)
+ current.Plan = []byte(out)
} else {
- if err != nil {
- out = out[1 : len(out)-1] // remove the double quotes
- expected.WriteString(fmt.Sprintf("Gen4 error: %s\n", out))
- } else {
- expected.WriteString(fmt.Sprintf("%s\n", out))
- }
+ current.V3Plan = []byte(outFirstPlanner)
+ current.Gen4Plan = []byte(out)
}
})
- expected.WriteString("\n")
+ expected = append(expected, current)
}
-
if tempDir != "" {
- gotFile := fmt.Sprintf("%s/%s", tempDir, filename)
- _ = os.WriteFile(gotFile, []byte(strings.TrimSpace(expected.String())+"\n"), 0644)
+ name := strings.TrimSuffix(filename, filepath.Ext(filename))
+ name = filepath.Join(tempDir, name+".json")
+ file, err := os.Create(name)
+ require.NoError(t, err)
+ enc := json.NewEncoder(file)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", " ")
+ err = enc.Encode(expected)
+ if err != nil {
+ require.NoError(t, err)
+ }
}
})
}
-func getPlanOutput(tcase testCase, vschema *vschemaWrapper, render bool) (out string, err error) {
+func readJSONTests(filename string) []planTest {
+ var output []planTest
+ file, err := os.Open(locateFile(filename))
+ if err != nil {
+ panic(err)
+ }
+ dec := json.NewDecoder(file)
+ err = dec.Decode(&output)
+ if err != nil {
+ panic(err)
+ }
+ return output
+}
+
+func getPlanOutput(tcase planTest, vschema *vschemaWrapper, render bool) (out string, err error) {
defer func() {
if r := recover(); r != nil {
out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack()))
}
}()
- plan, err := TestBuilder(tcase.input, vschema, vschema.currentDb())
+ plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb())
if render && plan != nil {
viz, err := engine.GraphViz(plan.Instructions)
if err == nil {
@@ -813,135 +856,32 @@ func getPlanOutput(tcase testCase, vschema *vschemaWrapper, render bool) (out st
func getPlanOrErrorOutput(err error, plan *engine.Plan) string {
if err != nil {
- return err.Error()
+ return "\"" + err.Error() + "\""
}
- bout, _ := json.MarshalIndent(plan, "", " ")
- return string(bout)
-}
-
-type testCase struct {
- file string
- lineno int
- input string
- output string
- output2ndPlanner string
- comments string
-}
-
-func iterateExecFile(name string) (testCaseIterator chan testCase) {
- name = locateFile(name)
- fd, err := os.OpenFile(name, os.O_RDONLY, 0)
+ b := new(bytes.Buffer)
+ enc := json.NewEncoder(b)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", " ")
+ err = enc.Encode(plan)
if err != nil {
- panic(fmt.Sprintf("Could not open file %s", name))
+ panic(err)
}
- testCaseIterator = make(chan testCase)
- var comments string
- go func() {
- defer close(testCaseIterator)
-
- r := bufio.NewReader(fd)
- lineno := 0
- for {
- binput, err := r.ReadBytes('\n')
- if err != nil {
- if err != io.EOF {
- panic(fmt.Errorf("error reading file %s: line %d: %s", name, lineno, err.Error()))
- }
- break
- }
- lineno++
- input := string(binput)
- if input == "" || input == "\n" || strings.HasPrefix(input, "Length:") {
- continue
- }
- if input[0] == '#' {
- comments = comments + input
- continue
- }
- err = json.Unmarshal(binput, &input)
- if err != nil {
- panic(fmt.Sprintf("Line: %d, input: %s, error: %v\n", lineno, binput, err))
- }
- input = strings.Trim(input, "\"")
- var output []byte
- for {
- l, err := r.ReadBytes('\n')
- lineno++
- if err != nil {
- panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error()))
- }
- output = append(output, l...)
- if l[0] == '}' {
- output = output[:len(output)-1]
- break
- }
- if l[0] == '"' {
- output = output[1 : len(output)-2]
- break
- }
- }
-
- binput, err = r.ReadBytes('\n')
- lineno++
- var output2Planner []byte
- if err != nil && err != io.EOF {
- panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error()))
- }
- nextLine := string(binput)
- switch {
- case nextLine == samePlanMarker:
- output2Planner = output
- case strings.HasPrefix(nextLine, "{"):
- output2Planner = append(output2Planner, binput...)
- for {
- l, err := r.ReadBytes('\n')
- lineno++
- if err != nil {
- panic(fmt.Sprintf("error reading file %s line# %d: %s", name, lineno, err.Error()))
- }
- output2Planner = append(output2Planner, l...)
- if l[0] == '}' {
- output2Planner = output2Planner[:len(output2Planner)-1]
- break
- }
- if l[0] == '"' {
- output2Planner = output2Planner[1 : len(output2Planner)-2]
- break
- }
- }
- case strings.HasPrefix(nextLine, gen4ErrorPrefix):
- output2Planner = []byte(nextLine[len(gen4ErrorPrefix) : len(nextLine)-1])
- }
- testCaseIterator <- testCase{
- file: name,
- lineno: lineno,
- input: input,
- output: string(output),
- output2ndPlanner: string(output2Planner),
- comments: comments,
- }
- comments = ""
- }
- }()
- return testCaseIterator
+ return b.String()
}
func locateFile(name string) string {
return "testdata/" + name
}
-var benchMarkFiles = []string{"from_cases.txt", "filter_cases.txt", "large_cases.txt", "aggr_cases.txt", "select_cases.txt", "union_cases.txt"}
+var benchMarkFiles = []string{"from_cases.json", "filter_cases.json", "large_cases.json", "aggr_cases.json", "select_cases.json", "union_cases.json"}
func BenchmarkPlanner(b *testing.B) {
vschema := &vschemaWrapper{
- v: loadSchema(b, "schema_test.json", true),
+ v: loadSchema(b, "vschemas/schema.json", true),
sysVarEnabled: true,
}
for _, filename := range benchMarkFiles {
- var testCases []testCase
- for tc := range iterateExecFile(filename) {
- testCases = append(testCases, tc)
- }
+ testCases := readJSONTests(filename)
b.Run(filename+"-v3", func(b *testing.B) {
benchmarkPlanner(b, V3, testCases, vschema)
})
@@ -956,14 +896,14 @@ func BenchmarkPlanner(b *testing.B) {
func BenchmarkSemAnalysis(b *testing.B) {
vschema := &vschemaWrapper{
- v: loadSchema(b, "schema_test.json", true),
+ v: loadSchema(b, "vschemas/schema.json", true),
sysVarEnabled: true,
}
for i := 0; i < b.N; i++ {
for _, filename := range benchMarkFiles {
- for tc := range iterateExecFile(filename) {
- exerciseAnalyzer(tc.input, vschema.currentDb(), vschema)
+ for _, tc := range readJSONTests(filename) {
+ exerciseAnalyzer(tc.Query, vschema.currentDb(), vschema)
}
}
}
@@ -989,23 +929,13 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) {
func BenchmarkSelectVsDML(b *testing.B) {
vschema := &vschemaWrapper{
- v: loadSchema(b, "schema_test.json", true),
+ v: loadSchema(b, "vschemas/schema.json", true),
sysVarEnabled: true,
version: V3,
}
- var dmlCases []testCase
- var selectCases []testCase
-
- for tc := range iterateExecFile("dml_cases.txt") {
- dmlCases = append(dmlCases, tc)
- }
-
- for tc := range iterateExecFile("select_cases.txt") {
- if tc.output2ndPlanner != "" {
- selectCases = append(selectCases, tc)
- }
- }
+ dmlCases := readJSONTests("dml_cases.json")
+ selectCases := readJSONTests("select_cases.json")
rand.Shuffle(len(dmlCases), func(i, j int) {
dmlCases[i], dmlCases[j] = dmlCases[j], dmlCases[i]
@@ -1024,13 +954,13 @@ func BenchmarkSelectVsDML(b *testing.B) {
})
}
-func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []testCase, vschema *vschemaWrapper) {
+func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemaWrapper) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
for _, tcase := range testCases {
- if tcase.output2ndPlanner != "" {
+ if len(tcase.Gen4Plan) > 0 {
vschema.version = version
- _, _ = TestBuilder(tcase.input, vschema, vschema.currentDb())
+ _, _ = TestBuilder(tcase.Query, vschema, vschema.currentDb())
}
}
}
diff --git a/go/vt/vtgate/planbuilder/projection_pushing.go b/go/vt/vtgate/planbuilder/projection_pushing.go
index c48b3b57f8a..78898d9bd99 100644
--- a/go/vt/vtgate/planbuilder/projection_pushing.go
+++ b/go/vt/vtgate/planbuilder/projection_pushing.go
@@ -76,7 +76,7 @@ func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlpa
if err != nil {
return 0, false, err
}
- if added && ctx.SemTable.DirectDeps(expr.Expr).NumberOfTables() > 0 {
+ if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() {
return 0, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr))
}
if added {
@@ -322,7 +322,7 @@ func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *routeGen4, expr
func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error {
ti, err := semTable.TableInfoForExpr(expr.Expr)
- if err != nil && err != semantics.ErrMultipleTables {
+ if err != nil && err != semantics.ErrNotSingleTable {
return err
}
_, isDerivedTable := ti.(*semantics.DerivedTable)
diff --git a/go/vt/vtgate/planbuilder/rewrite.go b/go/vt/vtgate/planbuilder/rewrite.go
index d1b6a789a1b..4ff3edd9fd3 100644
--- a/go/vt/vtgate/planbuilder/rewrite.go
+++ b/go/vt/vtgate/planbuilder/rewrite.go
@@ -174,33 +174,40 @@ func rewriteHavingClause(node *sqlparser.Select) {
exprs := sqlparser.SplitAndExpression(nil, node.Having.Expr)
node.Having = nil
for _, expr := range exprs {
- var hasAggr bool
- sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
- switch x := cursor.Node().(type) {
- case *sqlparser.ColName:
- if !x.Qualifier.IsEmpty() {
- return false
- }
- originalExpr, isInMap := selectExprMap[x.Name.Lowered()]
- if isInMap {
- if sqlparser.ContainsAggregation(originalExpr) {
+ hasAggr := sqlparser.ContainsAggregation(expr)
+ if !hasAggr {
+ sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
+ visitColName(cursor.Node(), selectExprMap, func(original sqlparser.Expr) {
+ if sqlparser.ContainsAggregation(original) {
hasAggr = true
- } else {
- cursor.Replace(originalExpr)
}
- }
- return false
- default:
- _, isAggregate := x.(sqlparser.AggrFunc)
- hasAggr = hasAggr || isAggregate
- }
- return true
- }, nil)
-
+ })
+ return true
+ }, nil)
+ }
if hasAggr {
node.AddHaving(expr)
} else {
+ sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool {
+ visitColName(cursor.Node(), selectExprMap, func(original sqlparser.Expr) {
+ cursor.Replace(original)
+ })
+ return true
+ }, nil)
node.AddWhere(expr)
}
}
}
+func visitColName(cursor sqlparser.SQLNode, selectExprMap map[string]sqlparser.Expr, f func(original sqlparser.Expr)) {
+ switch x := cursor.(type) {
+ case *sqlparser.ColName:
+ if !x.Qualifier.IsEmpty() {
+ return
+ }
+ originalExpr, isInMap := selectExprMap[x.Name.Lowered()]
+ if isInMap {
+ f(originalExpr)
+ }
+ return
+ }
+}
diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go
index b72af989f3b..774b81bcb18 100644
--- a/go/vt/vtgate/planbuilder/simplifier_test.go
+++ b/go/vt/vtgate/planbuilder/simplifier_test.go
@@ -38,7 +38,7 @@ import (
func TestSimplifyBuggyQuery(t *testing.T) {
query := "(select id from unsharded union select id from unsharded_auto) union (select id from user union select name from unsharded)"
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
version: Gen4,
}
stmt, reserved, err := sqlparser.Parse2(query)
@@ -60,7 +60,7 @@ func TestSimplifyPanic(t *testing.T) {
t.Skip("not needed to run")
query := "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
version: Gen4,
}
stmt, reserved, err := sqlparser.Parse2(query)
@@ -81,14 +81,14 @@ func TestSimplifyPanic(t *testing.T) {
func TestUnsupportedFile(t *testing.T) {
t.Skip("run manually to see if any queries can be simplified")
vschema := &vschemaWrapper{
- v: loadSchema(t, "schema_test.json", true),
+ v: loadSchema(t, "vschemas/schema.json", true),
version: Gen4,
}
fmt.Println(vschema)
- for tcase := range iterateExecFile("unsupported_cases.txt") {
- t.Run(fmt.Sprintf("%d:%s", tcase.lineno, tcase.input), func(t *testing.T) {
- log.Errorf("%s:%d - %s", tcase.file, tcase.lineno, tcase.input)
- stmt, reserved, err := sqlparser.Parse2(tcase.input)
+ for _, tcase := range readJSONTests("unsupported_cases.txt") {
+ t.Run(tcase.Query, func(t *testing.T) {
+ log.Errorf("unsupported_cases.txt - %s", tcase.Query)
+ stmt, reserved, err := sqlparser.Parse2(tcase.Query)
require.NoError(t, err)
_, ok := stmt.(sqlparser.SelectStatement)
if !ok {
@@ -104,12 +104,12 @@ func TestUnsupportedFile(t *testing.T) {
reservedVars := sqlparser.NewReservedVars("vtg", reserved)
ast := rewritten.AST
origQuery := sqlparser.String(ast)
- stmt, _, _ = sqlparser.Parse2(tcase.input)
+ stmt, _, _ = sqlparser.Parse2(tcase.Query)
simplified := simplifier.SimplifyStatement(
stmt.(sqlparser.SelectStatement),
vschema.currentDb(),
vschema,
- keepSameError(tcase.input, reservedVars, vschema, rewritten.BindVarNeeds),
+ keepSameError(tcase.Query, reservedVars, vschema, rewritten.BindVarNeeds),
)
if simplified == nil {
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
new file mode 100644
index 00000000000..ce4f4b00fc7
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
@@ -0,0 +1,5075 @@
+[
+ {
+ "comment": "group by a unique vindex should revert to simple route, and having clause should find the correct symbols.",
+ "query": "select id, count(*) c from user group by id having max(col) > 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) c from user group by id having max(col) > 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
+ "Query": "select id, count(*) as c from `user` group by id having max(col) > 10",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) c from user group by id having max(col) > 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
+ "Query": "select id, count(*) as c from `user` group by id having max(col) > 10",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate in a subquery",
+ "query": "select a from (select count(*) as a from user) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with non-aggregate expressions.",
+ "query": "select id, count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(1) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate using distinctdistinct",
+ "query": "select distinct col from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select distinct col from `user` order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select distinct col from `user` order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by select col",
+ "query": "select col from user group by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct group by unique vindex",
+ "query": "select id, count(distinct col) from user group by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(distinct col) from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
+ "Query": "select id, count(distinct col) from `user` group by id",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(distinct col) from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
+ "Query": "select id, count(distinct col) from `user` group by id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct unique vindex",
+ "query": "select col, count(distinct id) from user group by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct id) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(distinct id) from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct id) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_distinct(1) AS count(distinct id)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(distinct id) from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct no unique vindex",
+ "query": "select col1, count(distinct col2) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS count(distinct col2)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|3) AS count(distinct col2)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct no unique vindex and no group by",
+ "query": "select count(distinct col2) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct col2) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_distinct_count(0) AS count(distinct col2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct col2) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_distinct(0|1) AS count(distinct col2)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count with distinct no unique vindex, count expression aliased",
+ "query": "select col1, count(distinct col2) c2 from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) c2 from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS c2",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) c2 from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|3) AS c2",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sum with distinct no unique vindex",
+ "query": "select col1, sum(distinct col2) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, sum(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_distinct_sum(1) AS sum(distinct col2)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, sum(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_distinct(1|3) AS sum(distinct col2)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "min with distinct no unique vindex. distinct is ignored.",
+ "query": "select col1, min(distinct col2) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1|3) AS min(distinct col2)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by count distinct",
+ "query": "select col1, count(distinct col2) k from user group by col1 order by k",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS k",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|3) AS k",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by aggregate function",
+ "query": "select count(*) b from user group by b",
+ "v3-plan": "Can't group on 'b'",
+ "gen4-plan": "Can't group on 'count(*)'"
+ },
+ {
+ "comment": "scatter aggregate multiple group by (columns)",
+ "query": "select a, b, count(*) from user group by b, a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
+ "OrderBy": "(1|3) ASC, (0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3), (1|4)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate multiple group by (numbers)",
+ "query": "select a, b, count(*) from user group by 2, 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by 2, 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1, weight_string(b), weight_string(a)",
+ "OrderBy": "(1|3) ASC, (0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1, weight_string(b), weight_string(a) order by b asc, a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by 2, 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3), (1|4)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate multiple group by columns inverse order",
+ "query": "select a, b, count(*) from user group by b, a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
+ "OrderBy": "(1|3) ASC, (0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by b, a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3), (1|4)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by column number",
+ "query": "select col from user group by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by 1 order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user group by 1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate group by invalid column number",
+ "query": "select col from user group by 2",
+ "plan": "Unknown column '2' in 'group statement'"
+ },
+ {
+ "comment": "scatter aggregate order by null",
+ "query": "select count(*) from user order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with numbered order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "0, 1, 2",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by 1 asc, 2 asc, 3 asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
+ "GroupBy": "(0|5), (1|6), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with named order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "0, 1, 2",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
+ "GroupBy": "(0|5), (1|6), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
+ "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with jumbled order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "0, 1, 2, 3",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
+ "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(4) AS count(*)",
+ "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
+ "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with jumbled group by and order by columns",
+ "query": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(4) AS count",
+ "GroupBy": "2, 1, 0, 3",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
+ "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(4) AS count(*)",
+ "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
+ "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
+ "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with some descending order by cols",
+ "query": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(3) AS count",
+ "GroupBy": "2, 1, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b)",
+ "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC",
+ "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b) order by 1 desc, 3 desc, b asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(3) AS count(*)",
+ "GroupBy": "(0|4), (2|6), (1|5)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
+ "OrderBy": "(0|4) DESC, (2|6) DESC, (1|5) ASC",
+ "Query": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a desc, c desc, b asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "invalid order by column numner for scatter",
+ "query": "select col, count(*) from user group by col order by 5 limit 10",
+ "plan": "Unknown column '5' in 'order clause'"
+ },
+ {
+ "comment": "aggregate with limit",
+ "query": "select col, count(*) from user group by col limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from user group by col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from user group by col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group by with collate operator",
+ "query": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
+ "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
+ "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for aggregates",
+ "query": "select id, count(*) from route2 group by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from route2 group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
+ "Query": "select id, count(*) from unsharded as route2 group by id",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from route2 group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
+ "Query": "select id, count(*) from unsharded as route2 group by id",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "order by on a reference table",
+ "query": "select col from ref order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref order by col asc",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref order by col asc",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "distinct and aggregate functions missing group by",
+ "query": "select distinct a, count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select a, count(*), weight_string(a) from `user` order by a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0, 1",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS a, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*) from `user` where 1 != 1",
+ "Query": "select a, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "distinct and aggregate functions",
+ "query": "select distinct a, count(*) from user group by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0, 0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc, a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), 1",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group by invalid column number (code is duplicated from symab).",
+ "query": "select id from user group by 1.1",
+ "v3-plan": "column number is not an int",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user group by 1.1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(0) AS id",
+ "GroupBy": "1",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, 1.1 from `user` where 1 != 1 group by 1.1",
+ "OrderBy": "1 ASC",
+ "Query": "select id, 1.1 from `user` group by 1.1 order by 1.1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group by out of range column number (code is duplicated from symab).",
+ "query": "select id from user group by 2",
+ "plan": "Unknown column '2' in 'group statement'"
+ },
+ {
+ "comment": "here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)",
+ "query": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
+ "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
+ "Table": "`user`, user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "order by inside derived tables can be ignored",
+ "query": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id order by user_extra.extra asc",
+ "ResultColumns": 2,
+ "Table": "`user`, user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
+ "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "here we keep the order since the column is visible on the outside, and used by the orderedAggregate",
+ "query": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a group by col order by col asc",
+ "Table": "`user`, user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "optimize group by when using distinct with no aggregation",
+ "query": "select distinct col1, col2 from user group by col1, col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col1, col2 from user group by col1, col2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0, 1, 0, 1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
+ "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC, (1|3) ASC",
+ "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc, col1 asc, col2 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct col1, col2 from user group by col1, col2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), (1|3)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2",
+ "OrderBy": "(0|2) ASC, (1|3) ASC",
+ "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "do not use distinct when using only aggregates and no group by",
+ "query": "select distinct count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct count(*) from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Grouping on join",
+ "query": "select user.a from user join user_extra group by user.a",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a from user join user_extra group by user.a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Cannot have more than one aggr(distinct...",
+ "query": "select count(distinct a), count(distinct b) from user",
+ "plan": "unsupported: only one distinct aggregation allowed in a select: count(distinct b)"
+ },
+ {
+ "comment": "multiple distinct functions with grouping.",
+ "query": "select col1, count(distinct col2), sum(distinct col2) from user group by col1",
+ "v3-plan": "unsupported: only one distinct aggregation allowed in a select: sum(distinct col2)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(distinct col2), sum(distinct col2) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|4) AS count(distinct col2), sum_distinct(2|4) AS sum(distinct col2)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregate query with order by aggregate column along with NULL",
+ "query": "select col, count(*) k from user group by col order by null, k",
+ "v3-plan": "unsupported: in scatter query: complex order by expression: null",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) k from user group by col order by null, k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS k",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as k from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregate query with order by NULL",
+ "query": "select col, count(*) k from user group by col order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) k from user group by col order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as k from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) k from user group by col order by null",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS k",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as k from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join query on sharding key with group by a unique vindex with having clause.",
+ "query": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, count(*) as c from `user`, user_extra where 1 != 1 group by `user`.id",
+ "Query": "select `user`.id, count(*) as c from `user`, user_extra where `user`.id = user_extra.user_id group by `user`.id having max(`user`.col) > 10",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery on sharding key with group by a unique vindex with having clause.",
+ "query": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation filtering by having on a route",
+ "query": "select id from user group by id having count(id) = 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user group by id having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 group by id",
+ "Query": "select id from `user` group by id having count(id) = 10",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user group by id having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 group by id",
+ "Query": "select id from `user` group by id having count(id) = 10",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "weight_string addition to group by",
+ "query": "select lower(textcol1) as v, count(*) from user group by v",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select lower(textcol1) as v, count(*) from user group by v",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select lower(textcol1) as v, count(*) from user group by v",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "weight_string addition to group by when also there in order by",
+ "query": "select char_length(texcol1) as a, count(*) from user group by a order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by inside and outside parenthesis select",
+ "query": "(select id from user order by 1 desc) order by 1 asc limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by 1 asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery in exists clause with an ordering",
+ "query": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "ProjectedIndexes": "-2,-1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, col, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select `user`.id, col, weight_string(id) from `user` order by id asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Column and Literal equality filter on scatter aggregates",
+ "query": "select count(*) a from user having a = 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a = 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Equality filtering with column and string literal on scatter aggregates",
+ "query": "select count(*) a from user having a = '1'",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a = '1'",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = '1'",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column and Literal not equal filter on scatter aggregates",
+ "query": "select count(*) a from user having a != 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a != 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 != 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Not equal filter with column and string literal on scatter aggregates",
+ "query": "select count(*) a from user having a != '1'",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a != '1'",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 != '1'",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Greater than filter on scatter aggregates",
+ "query": "select count(*) a from user having a > 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a > 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 > 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Greater Equal filter on scatter aggregates",
+ "query": "select count(*) a from user having a >= 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a >= 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 >= 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Less than filter on scatter aggregates",
+ "query": "select count(*) a from user having a < 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a < 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 < 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Less Equal filter on scatter aggregates",
+ "query": "select count(*) a from user having a <= 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a <= 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 <= 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Less Equal filter on scatter with grouping",
+ "query": "select col, count(*) a from user group by col having a <= 10",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) a from user group by col having a <= 10",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":1 <= 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS a",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*) as a from `user` where 1 != 1 group by col",
+ "OrderBy": "0 ASC",
+ "Query": "select col, count(*) as a from `user` group by col order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "We should be able to find grouping keys on ordered aggregates",
+ "query": "select count(*) as a, val1 from user group by val1 having a = 1.00",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = 1.00",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(0) AS a",
+ "GroupBy": "(1|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "distinct on text column with collation",
+ "query": "select col, count(distinct textcol1) from user group by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct textcol1) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS count(distinct textcol1)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, textcol1, weight_string(textcol1) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol1)",
+ "OrderBy": "0 ASC, (1|2) ASC",
+ "Query": "select col, textcol1, weight_string(textcol1) from `user` group by col, textcol1, weight_string(textcol1) order by col asc, textcol1 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(distinct textcol1) from user group by col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1 COLLATE latin1_swedish_ci) AS count(distinct textcol1)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, textcol1 from `user` where 1 != 1 group by col, textcol1",
+ "OrderBy": "0 ASC, 1 ASC COLLATE latin1_swedish_ci",
+ "Query": "select col, textcol1 from `user` group by col, textcol1 order by col asc, textcol1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation filtering by having on a route with no group by with non-unique vindex filter",
+ "query": "select 1 from user having count(id) = 10 and name = 'a'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10 and name = 'a'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` having count(id) = 10 and `name` = 'a'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"a\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10 and name = 'a'",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"a\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
+ "Query": "select 1, count(id) from `user` where `name` = 'a'",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregates and joins",
+ "query": "select count(*) from user join user_extra",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation filtering by having on a route with no group by",
+ "query": "select 1 from user having count(id) = 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` having count(id) = 10",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user having count(id) = 10",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
+ "Query": "select 1, count(id) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregate on join",
+ "query": "select user.a, count(*) from user join user_extra group by user.a",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a, count(*) from user join user_extra group by user.a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregate on other table in join",
+ "query": "select user.a, count(user_extra.a) from user join user_extra group by user.a",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a, count(user_extra.a) from user join user_extra group by user.a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count(user_extra.a)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(user_extra.a)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(user_extra.a) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(user_extra.a) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation spread out across three routes",
+ "query": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count(u.textcol1), sum_count(1) AS count(ue.foo)",
+ "GroupBy": "(2|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1)) as count(u.textcol1)",
+ "([COLUMN 5] * COALESCE([COLUMN 6], INT64(1))) * COALESCE([COLUMN 7], INT64(1)) as count(ue.foo)",
+ "[COLUMN 0] as bar",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:1,R:2,R:3,L:2,R:4,R:5",
+ "JoinVars": {
+ "u_foo": 0
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u where 1 != 1 group by u.foo, weight_string(u.foo)",
+ "Query": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u group by u.foo, weight_string(u.foo)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
+ "JoinVars": {
+ "ue_bar": 0
+ },
+ "TableName": "user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where 1 != 1 group by ue.bar, weight_string(ue.bar)",
+ "Query": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where ue.bar = :u_foo group by ue.bar, weight_string(ue.bar)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where 1 != 1 group by us.bar, weight_string(us.bar)",
+ "Query": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where us.baz = :ue_bar group by us.bar, weight_string(us.bar)",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "using two distinct columns - min with distinct vindex, sum with distinct without vindex",
+ "query": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1), sum_distinct_sum(2) AS sum(distinct col3)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(col3)",
+ "OrderBy": "(0|3) ASC, (2|4) ASC",
+ "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(col3) order by col1 asc, col3 asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1) AS min(distinct id), sum_distinct(2|4) AS sum(distinct col3)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, weight_string(col1), col3, weight_string(col3)",
+ "OrderBy": "(0|3) ASC, (2|4) ASC",
+ "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, weight_string(col1), col3, weight_string(col3) order by col1 asc, col3 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation on top of semijoin",
+ "query": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1] as count(*)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_apa": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` where 1 != 1 group by `user`.apa, weight_string(`user`.apa)",
+ "Query": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` group by `user`.apa, weight_string(`user`.apa)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.bar = :user_apa",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "we have to track the order of distinct aggregation expressions",
+ "query": "select val2, count(distinct val1), count(*) from user group by val2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select val2, count(distinct val1), count(*) from user group by val2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct_count(1) AS count(distinct val1), sum_count(2) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select val2, count(distinct val1), count(*) from user group by val2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|4) AS count(distinct val1), sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, weight_string(val2), val1, weight_string(val1)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, weight_string(val2), val1, weight_string(val1) order by val2 asc, val1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "group by column alias",
+ "query": "select ascii(val1) as a, count(*) from user group by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ascii(val1) as a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ascii(val1) as a, count(*) from user group by a",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple distinct aggregations on the same column is allowed",
+ "query": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1",
+ "v3-plan": "unsupported: only one distinct aggregation allowed in a select: sum(distinct tcol2)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|4) AS count(distinct tcol2), sum_distinct(2|4) AS sum(distinct tcol2)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2)",
+ "OrderBy": "(0|3) ASC, (1|4) ASC",
+ "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2) order by tcol1 asc, tcol2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple distinct aggregations on the same column in different positions",
+ "query": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1",
+ "v3-plan": "unsupported: only one distinct aggregation allowed in a select: sum(distinct tcol2)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(0|4) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|4) AS sum(distinct tcol2)",
+ "GroupBy": "(1|5)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` where 1 != 1 group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1)",
+ "OrderBy": "(1|5) ASC, (0|4) ASC",
+ "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1) order by tcol1 asc, tcol2 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "distinct aggregation will 3 table join query",
+ "query": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_distinct(1|2) AS count(distinct u.val2)",
+ "GroupBy": "0 COLLATE latin1_swedish_ci",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as textcol1",
+ "[COLUMN 1] as val2",
+ "[COLUMN 2]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:2,L:3,L:5",
+ "JoinVars": {
+ "u2_val2": 0
+ },
+ "TableName": "`user`_`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,L:2,L:0,R:1,L:1",
+ "JoinVars": {
+ "u_val2": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u where 1 != 1 group by u.val2, weight_string(u.val2), u.textcol1",
+ "OrderBy": "2 ASC COLLATE latin1_swedish_ci, (0|1) ASC",
+ "Query": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u group by u.val2, weight_string(u.val2), u.textcol1 order by u.textcol1 asc, u.val2 asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u2.val2, weight_string(u2.val2) from `user` as u2 where 1 != 1 group by u2.val2, weight_string(u2.val2)",
+ "Query": "select u2.val2, weight_string(u2.val2) from `user` as u2 where u2.id = :u_val2 group by u2.val2, weight_string(u2.val2)",
+ "Table": "`user`",
+ "Values": [
+ ":u_val2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.id = :u2_val2",
+ "Table": "music",
+ "Values": [
+ ":u2_val2"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "interleaving grouping, aggregation and join",
+ "query": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "min(1) AS min(user_extra.foo), max(3) AS max(user_extra.bar)",
+ "GroupBy": "0, (2|4)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as col",
+ "[COLUMN 3] as min(user_extra.foo)",
+ "[COLUMN 1] as bar",
+ "[COLUMN 4] as max(user_extra.bar)",
+ "[COLUMN 2]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,R:1,R:2",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1 group by `user`.col, `user`.bar, weight_string(`user`.bar)",
+ "OrderBy": "0 ASC, (1|2) ASC",
+ "Query": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` group by `user`.col, `user`.bar, weight_string(`user`.bar) order by `user`.col asc, `user`.bar asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where user_extra.bar = :user_col group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "group_concat on single shards",
+ "query": "select group_concat(user_id order by name), id from user group by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select group_concat(user_id order by name), id from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
+ "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select group_concat(user_id order by name), id from user group by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
+ "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "query": "select count(distinct user_id, name) from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct user_id, name) from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
+ "Query": "select count(distinct user_id, `name`) from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(distinct user_id, name) from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
+ "Query": "select count(distinct user_id, `name`) from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "query": "select count(distinct user_id, name) from user",
+ "v3-plan": "unsupported: only one expression allowed inside aggregates: count(distinct user_id, `name`)",
+ "gen4-plan": "aggregate functions take a single argument 'count(distinct user_id, `name`)'"
+ },
+ {
+ "query": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS sum(col)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as sum(col)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,R:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col as col, 32, sum(col) from `user` where 1 != 1",
+ "Query": "select `user`.col as col, 32, sum(col) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "find aggregation expression and use column offset in filter",
+ "query": "select foo, count(*) from user group by foo having count(*) = 3",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo, count(*) from user group by foo having count(*) = 3",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 3",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "find aggregation expression and use column offset in filter times two",
+ "query": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 + :2 = 42",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "find aggregation expression and use column offset in filter times three",
+ "query": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 + :2 = 42",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "having should be able to add new aggregation expressions in having",
+ "query": "select foo from user group by foo having count(*) = 3",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select foo from user group by foo having count(*) = 3",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 3",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 3",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count(u.`name`)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(u.`name`)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
+ "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
+ "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "query": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id where 1 != 1 group by u.id",
+ "Query": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id group by u.id having count(u.`name`) = 3",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u, user_extra as ue where 1 != 1 group by u.id",
+ "Query": "select u.id from `user` as u, user_extra as ue where ue.user_id = u.id group by u.id having count(u.`name`) = 3",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "only extract the aggregation once, even if used twice",
+ "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 < 3 and :1 > 5",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
+ "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
+ "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "query": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue",
+ "v3-plan": "symbol ue.col not found in subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 > 10",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS 1, sum_count(1) AS count(ue.col)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(ue.col) from `user` as u where 1 != 1",
+ "Query": "select 1, count(ue.col) from `user` as u",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from user_extra as ue where 1 != 1",
+ "Query": "select :__sq1 from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "group by and ',' joins with condition",
+ "query": "select user.col from user join user_extra on user_extra.col = user.col group by user.id",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user_extra.col = user.col group by user.id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(0) AS col",
+ "GroupBy": "(2|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] as col",
+ "[COLUMN 1]",
+ "[COLUMN 0] as id"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.col, `user`.id, weight_string(`user`.id)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` group by `user`.col, `user`.id, weight_string(`user`.id) order by `user`.id asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1 group by 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_col group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate symtab lookup error",
+ "query": "select id, b as id, count(*) from user order by id",
+ "v3-plan": "ambiguous symbol reference: id",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, b as id, count(*) from user order by id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, random(1) AS id, sum_count_star(2) AS count(*)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, b as id, count(*), weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select id, b as id, count(*), weight_string(id) from `user` order by id asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggr and non-aggr without group by (with query does not give useful result out)",
+ "query": "select id, count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(1) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "group by and ',' joins",
+ "query": "select user.id from user, user_extra group by id",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user, user_extra group by id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(1|0)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1]",
+ "[COLUMN 0] as id"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(`user`.id) from `user` where 1 != 1 group by id, weight_string(`user`.id)",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(`user`.id) from `user` group by id, weight_string(`user`.id) order by id asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "count on column from LIMIT",
+ "query": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count(0) AS count(city)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] as count(city)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select phone, id, city from `user` where 1 != 1",
+ "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count(*) on column from LIMIT",
+ "query": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as count(*)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select phone, id, city from `user` where 1 != 1",
+ "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "count non-null columns incoming from outer joins should work well",
+ "query": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "count(0) AS count(col)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as count(col)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col as col from user_extra where 1 != 1",
+ "Query": "select user_extra.col as col from user_extra where user_extra.id = :user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "grouping on data from derived table",
+ "query": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1] as val1",
+ "[COLUMN 0] as count(*)",
+ "[COLUMN 2]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, val1, weight_string(val1) from `user` where 1 != 1",
+ "OrderBy": "(1|2) ASC, (1|2) ASC",
+ "Query": "select id, val1, weight_string(val1) from `user` where val2 < 4 order by val1 asc, val1 asc limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Can't inline derived table when it has HAVING with aggregation function",
+ "query": "select * from (select id from user having count(*) = 1) s",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select id from user having count(*) = 1) s",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select id from `user` where 1 != 1) as s where 1 != 1",
+ "Query": "select * from (select id from `user` having count(*) = 1) as s",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select id from user having count(*) = 1) s",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": ":1 = 1",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) from `user` where 1 != 1",
+ "Query": "select id, count(*) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Group By X Order By X",
+ "query": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)",
+ "v3-plan": "unsupported: in scatter query: complex order by expression: count(`user`.intcol)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count(`user`.intcol)",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.intcol, count(`user`.intcol) from `user` where 1 != 1 group by `user`.intcol",
+ "OrderBy": "0 ASC",
+ "Query": "select `user`.intcol, count(`user`.intcol) from `user` group by `user`.intcol order by `user`.intcol asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "AggregateRandom in non full group by query",
+ "query": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS name, sum_count(2) AS count(m.predef1)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as id",
+ "[COLUMN 2] as name",
+ "[COLUMN 3] * COALESCE([COLUMN 4], INT64(1)) as count(m.predef1)",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:2,R:3,R:0,L:1,R:1",
+ "JoinVars": {
+ "m_order": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select m.`order`, count(m.predef1), weight_string(m.`order`) from user_extra as m where 1 != 1 group by m.`order`, weight_string(m.`order`)",
+ "Query": "select m.`order`, count(m.predef1), weight_string(m.`order`) from user_extra as m group by m.`order`, weight_string(m.`order`)",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
+ "Query": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where u.id = :m_order group by u.id, weight_string(u.id)",
+ "Table": "`user`",
+ "Values": [
+ ":m_order"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregation in a left join query",
+ "query": "select count (u.id) from user u left join user_extra ue on u.col = ue.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count (u.id) from user u left join user_extra ue on u.col = ue.col",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count(u.id)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(u.id)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1,R:1",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, count(u.id) from `user` as u where 1 != 1 group by u.col",
+ "Query": "select u.col, count(u.id) from `user` as u group by u.col",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from user_extra as ue where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from user_extra as ue where ue.col = :u_col group by 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Aggregations from derived table used in arithmetic outside derived table",
+ "query": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A",
+ "v3-plan": "unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as a",
+ "[COLUMN 1] as b",
+ "[COLUMN 0] / [COLUMN 1] as d"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS a, sum(1) AS b",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(a) as a, sum(b) as b from `user` where 1 != 1",
+ "Query": "select sum(a) as a, sum(b) as b from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "when pushing predicates into derived tables, make sure to put them in HAVING when they contain aggregations",
+ "query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1",
+ "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where `count` >= :v2",
+ "Table": "user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1",
+ "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where `count` >= :v2",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation, where and derived tables - we can push extremums",
+ "query": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "max(1) AS bazo",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo, max(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select foo, max(baz) as bazo, weight_string(foo) from (select foo, baz from `user` having max(baz) between 100 and 200) as f group by foo, weight_string(foo) order by foo asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt b/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt
deleted file mode 100644
index d1c54cb1b2d..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.txt
+++ /dev/null
@@ -1,5776 +0,0 @@
-# Test cases in this file follow the code in ordered_aggregate.go.
-#
-# Aggregate on unsharded
-"select count(*), col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*), col from unsharded where 1 != 1",
- "Query": "select count(*), col from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*), col from unsharded where 1 != 1",
- "Query": "select count(*), col from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Aggregate on unique sharded
-"select count(*), col from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), col from `user` where 1 != 1",
- "Query": "select count(*), col from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), col from `user` where 1 != 1",
- "Query": "select count(*), col from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Aggregate detection (non-aggregate function)
-"select fun(1), col from user"
-{
- "QueryType": "SELECT",
- "Original": "select fun(1), col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select fun(1), col from `user` where 1 != 1",
- "Query": "select fun(1), col from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select fun(1), col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select fun(1), col from `user` where 1 != 1",
- "Query": "select fun(1), col from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select distinct with unique vindex for scatter route.
-"select distinct col1, id from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1",
- "Query": "select distinct col1, id from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1",
- "Query": "select distinct col1, id from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct and group by together for single route - group by is redundant
-"select distinct col1, id from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user group by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1 group by col1",
- "Query": "select distinct col1, id from `user` group by col1",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, id from user group by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, id from `user` where 1 != 1 group by col1",
- "Query": "select distinct col1, id from `user` group by col1",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter group by a text column
-"select count(*), a, textcol1, b from user group by a, textcol1, b"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), a, textcol1, b from user group by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1, 4, 3",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b)",
- "OrderBy": "(1|5) ASC, (2|4) ASC, (3|6) ASC",
- "Query": "select count(*), a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) order by a asc, textcol1 asc, b asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), a, textcol1, b from user group by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "GroupBy": "(1|4), 2 COLLATE latin1_swedish_ci, (3|5)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), textcol1, b, weight_string(b)",
- "OrderBy": "(1|4) ASC, 2 ASC COLLATE latin1_swedish_ci, (3|5) ASC",
- "Query": "select count(*), a, textcol1, b, weight_string(a), weight_string(b) from `user` group by a, weight_string(a), textcol1, b, weight_string(b) order by a asc, textcol1 asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter group by a integer column. Do not add weight strings for this.
-"select count(*), intcol from user group by intcol"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), intcol from user group by intcol",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), intcol from `user` where 1 != 1 group by intcol",
- "OrderBy": "1 ASC",
- "Query": "select count(*), intcol from `user` group by intcol order by intcol asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*), intcol from user group by intcol",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), intcol from `user` where 1 != 1 group by intcol",
- "OrderBy": "1 ASC",
- "Query": "select count(*), intcol from `user` group by intcol order by intcol asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter group by a text column, reuse existing weight_string
-"select count(*) k, a, textcol1, b from user group by a, textcol1, b order by k, textcol1"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) k, a, textcol1, b from user group by a, textcol1, b order by k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC, (2|4) ASC",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1, 4, 3",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as k, a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b)",
- "OrderBy": "(2|4) ASC, (1|5) ASC, (3|6) ASC",
- "Query": "select count(*) as k, a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) from `user` group by a, textcol1, b, weight_string(textcol1), weight_string(a), weight_string(b) order by textcol1 asc, a asc, b asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) k, a, textcol1, b from user group by a, textcol1, b order by k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC, 2 ASC COLLATE latin1_swedish_ci",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS k",
- "GroupBy": "(1|4), 2 COLLATE latin1_swedish_ci, (3|5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as k, a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), textcol1, b, weight_string(b)",
- "OrderBy": "(1|4) ASC, 2 ASC COLLATE latin1_swedish_ci, (3|5) ASC",
- "Query": "select count(*) as k, a, textcol1, b, weight_string(a), weight_string(b) from `user` group by a, weight_string(a), textcol1, b, weight_string(b) order by a asc, textcol1 asc, b asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count aggregate
-"select count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# sum aggregate
-"select sum(col) from user"
-{
- "QueryType": "SELECT",
- "Original": "select sum(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select sum(col) from `user` where 1 != 1",
- "Query": "select sum(col) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sum(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS sum(col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select sum(col) from `user` where 1 != 1",
- "Query": "select sum(col) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# min aggregate
-"select min(col) from user"
-{
- "QueryType": "SELECT",
- "Original": "select min(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "min(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select min(col) from `user` where 1 != 1",
- "Query": "select min(col) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select min(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "min(0) AS min(col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select min(col) from `user` where 1 != 1",
- "Query": "select min(col) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# max aggregate
-"select max(col) from user"
-{
- "QueryType": "SELECT",
- "Original": "select max(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(col) from `user` where 1 != 1",
- "Query": "select max(col) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select max(col) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0) AS max(col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(col) from `user` where 1 != 1",
- "Query": "select max(col) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct and group by together for scatter route
-"select distinct col1, col2 from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1) order by col1 asc, col2 asc, col1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|3)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1 order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregate on RHS subquery (tests symbol table merge)
-"select user.a, t.b from user join (select count(*) b from unsharded) as t"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, t.b from user join (select count(*) b from unsharded) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.b from (select count(*) as b from unsharded where 1 != 1) as t where 1 != 1",
- "Query": "select t.b from (select count(*) as b from unsharded) as t",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.a, t.b from user join (select count(*) b from unsharded) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.b from (select count(*) as b from unsharded where 1 != 1) as t where 1 != 1",
- "Query": "select t.b from (select count(*) as b from unsharded) as t",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# group by a unique vindex should use a simple route
-"select id, count(*) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex and other column should use a simple route
-"select id, col, count(*) from user group by id, col"
-{
- "QueryType": "SELECT",
- "Original": "select id, col, count(*) from user group by id, col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, col, count(*) from `user` where 1 != 1 group by id, col",
- "Query": "select id, col, count(*) from `user` group by id, col",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, col, count(*) from user group by id, col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, col, count(*) from `user` where 1 != 1 group by id, col",
- "Query": "select id, col, count(*) from `user` group by id, col",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a non-vindex column should use an OrderdAggregate primitive
-"select col, count(*) from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by must only reference expressions in the select list
-"select col, count(*) from user group by col, baz"
-"unsupported: in scatter query: group by column must reference column in SELECT list"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col, baz",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0, (2|3)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*), baz, weight_string(baz) from `user` where 1 != 1 group by col, baz, weight_string(baz)",
- "OrderBy": "0 ASC, (2|3) ASC",
- "Query": "select col, count(*), baz, weight_string(baz) from `user` group by col, baz, weight_string(baz) order by col asc, baz asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a non-unique vindex column should use an OrderedAggregate primitive
-"select name, count(*) from user group by name"
-{
- "QueryType": "SELECT",
- "Original": "select name, count(*) from user group by name",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, count(*), weight_string(`name`) from `user` where 1 != 1 group by `name`, weight_string(`name`)",
- "OrderBy": "(0|2) ASC",
- "Query": "select `name`, count(*), weight_string(`name`) from `user` group by `name`, weight_string(`name`) order by `name` asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select name, count(*) from user group by name",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, count(*), weight_string(`name`) from `user` where 1 != 1 group by `name`, weight_string(`name`)",
- "OrderBy": "(0|2) ASC",
- "Query": "select `name`, count(*), weight_string(`name`) from `user` group by `name`, weight_string(`name`) order by `name` asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex should use a simple route, even if aggr is complex
-"select id, 1+count(*) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, 1+count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, 1 + count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, 1 + count(*) from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, 1+count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, 1 + count(*) from `user` where 1 != 1 group by id",
- "Query": "select id, 1 + count(*) from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex where alias from select list is used
-"select id as val, 1+count(*) from user group by val"
-{
- "QueryType": "SELECT",
- "Original": "select id as val, 1+count(*) from user group by val",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as val, 1 + count(*) from `user` where 1 != 1 group by val",
- "Query": "select id as val, 1 + count(*) from `user` group by val",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as val, 1+count(*) from user group by val",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as val, 1 + count(*) from `user` where 1 != 1 group by val",
- "Query": "select id as val, 1 + count(*) from `user` group by val",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex where expression is qualified (alias should be ignored)
-"select val as id, 1+count(*) from user group by user.id"
-{
- "QueryType": "SELECT",
- "Original": "select val as id, 1+count(*) from user group by user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val as id, 1 + count(*) from `user` where 1 != 1 group by `user`.id",
- "Query": "select val as id, 1 + count(*) from `user` group by `user`.id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select val as id, 1+count(*) from user group by user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val as id, 1 + count(*) from `user` where 1 != 1 group by `user`.id",
- "Query": "select val as id, 1 + count(*) from `user` group by `user`.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex where it should skip non-aliased expressions.
-"select *, id, 1+count(*) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select *, id, 1+count(*) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select *, id, 1 + count(*) from `user` where 1 != 1 group by id",
- "Query": "select *, id, 1 + count(*) from `user` group by id",
- "Table": "`user`"
- }
-}
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# group by a unique vindex should revert to simple route, and having clause should find the correct symbols.
-"select id, count(*) c from user group by id having id=1 and c=10"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having id=1 and c=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` group by id having id = 1 and c = 10",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having id=1 and c=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` where id = 1 group by id having count(*) = 10",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by a unique vindex should revert to simple route, and having clause should find the correct symbols.
-"select id, count(*) c from user group by id having max(col) \u003e 10"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having max(col) \u003e 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` group by id having max(col) \u003e 10",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) c from user group by id having max(col) \u003e 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id",
- "Query": "select id, count(*) as c from `user` group by id having max(col) \u003e 10",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate in a subquery
-"select a from (select count(*) as a from user) t"
-{
- "QueryType": "SELECT",
- "Original": "select a from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with non-aggregate expressions.
-"select id, count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(1) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate using distinctdistinct
-"select distinct col from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select distinct col from `user` order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select distinct col from `user` order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by select col
-"select col from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct group by unique vindex
-"select id, count(distinct col) from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(distinct col) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
- "Query": "select id, count(distinct col) from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(distinct col) from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id",
- "Query": "select id, count(distinct col) from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct unique vindex
-"select col, count(distinct id) from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct id) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(distinct id) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct id) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_distinct(1) AS count(distinct id)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(distinct id) from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct no unique vindex
-"select col1, count(distinct col2) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS count(distinct col2)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|3) AS count(distinct col2)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct no unique vindex and no group by
-"select count(distinct col2) from user"
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct col2) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count_distinct_count(0) AS count(distinct col2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
- "OrderBy": "(0|1) ASC",
- "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct col2) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count_distinct(0|1) AS count(distinct col2)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)",
- "OrderBy": "(0|1) ASC",
- "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count with distinct no unique vindex, count expression aliased
-"select col1, count(distinct col2) c2 from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) c2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS c2",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) c2 from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|3) AS c2",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# sum with distinct no unique vindex
-"select col1, sum(distinct col2) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, sum(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_distinct_sum(1) AS sum(distinct col2)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, sum(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_distinct(1|3) AS sum(distinct col2)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# min with distinct no unique vindex. distinct is ignored.
-"select col1, min(distinct col2) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC",
- "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1|3) AS min(distinct col2)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by count distinct
-"select col1, count(distinct col2) k from user group by col1 order by k"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS k",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2) k from user group by col1 order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|3) AS k",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by aggregate function
-"select count(*) b from user group by b"
-"Can't group on 'b'"
-Gen4 error: Can't group on 'count(*)'
-
-# scatter aggregate multiple group by (columns)
-"select a, b, count(*) from user group by b, a"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
- "OrderBy": "(1|3) ASC, (0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3), (1|4)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate multiple group by (numbers)
-"select a, b, count(*) from user group by 2, 1"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by 2, 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1, weight_string(b), weight_string(a)",
- "OrderBy": "(1|3) ASC, (0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1, weight_string(b), weight_string(a) order by b asc, a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by 2, 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3), (1|4)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate multiple group by columns inverse order
-"select a, b, count(*) from user group by b, a"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)",
- "OrderBy": "(1|3) ASC, (0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by b, a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3), (1|4)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by column number
-"select col from user group by 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by 1 order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user group by 1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate group by invalid column number
-"select col from user group by 2"
-"Unknown column '2' in 'group statement'"
-Gen4 plan same as above
-
-# scatter aggregate order by null
-"select count(*) from user order by null"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with numbered order by columns
-"select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "0, 1, 2",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by 1 asc, 2 asc, 3 asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
- "GroupBy": "(0|5), (1|6), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with named order by columns
-"select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "0, 1, 2",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)",
- "GroupBy": "(0|5), (1|6), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
- "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with jumbled order by columns
-"select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "0, 1, 2, 3",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
- "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(4) AS count(*)",
- "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
- "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with jumbled group by and order by columns
-"select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(4) AS count",
- "GroupBy": "2, 1, 0, 3",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)",
- "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(4) AS count(*)",
- "GroupBy": "(3|8), (1|6), (0|5), (2|7)",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)",
- "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC",
- "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with some descending order by cols
-"select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(3) AS count",
- "GroupBy": "2, 1, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b)",
- "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC",
- "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b) order by 1 desc, 3 desc, b asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(3) AS count(*)",
- "GroupBy": "(0|4), (2|6), (1|5)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)",
- "OrderBy": "(0|4) DESC, (2|6) DESC, (1|5) ASC",
- "Query": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a desc, c desc, b asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# invalid order by column numner for scatter
-"select col, count(*) from user group by col order by 5 limit 10"
-"Unknown column '5' in 'order clause'"
-Gen4 plan same as above
-
-# aggregate with limit
-"select col, count(*) from user group by col limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Group by with collate operator
-"select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
- "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci",
- "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules for aggregates
-"select id, count(*) from route2 group by id"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from route2 group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
- "Query": "select id, count(*) from unsharded as route2 group by id",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from route2 group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id",
- "Query": "select id, count(*) from unsharded as route2 group by id",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# order by on a reference table
-"select col from ref order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from ref order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref order by col asc",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from ref order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref order by col asc",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# distinct and aggregate functions missing group by
-"select distinct a, count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select a, count(*), weight_string(a) from `user` order by a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS a, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*) from `user` where 1 != 1",
- "Query": "select a, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct and aggregate functions
-"select distinct a, count(*) from user group by a"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0, 0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc, a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), 1",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|2) ASC",
- "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Group by invalid column number (code is duplicated from symab).
-"select id from user group by 1.1"
-"column number is not an int"
-{
- "QueryType": "SELECT",
- "Original": "select id from user group by 1.1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS id",
- "GroupBy": "1",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, 1.1 from `user` where 1 != 1 group by 1.1",
- "OrderBy": "1 ASC",
- "Query": "select id, 1.1 from `user` group by 1.1 order by 1.1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Group by out of range column number (code is duplicated from symab).
-"select id from user group by 2"
-"Unknown column '2' in 'group statement'"
-Gen4 plan same as above
-
-# here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)
-"select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
- "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
- "Table": "`user`, user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# order by inside derived tables can be ignored
-"select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a"
-{
- "QueryType": "SELECT",
- "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id order by user_extra.extra asc",
- "ResultColumns": 2,
- "Table": "`user`, user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1",
- "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-
-# here we keep the order since the column is visible on the outside, and used by the orderedAggregate
-"select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a group by col order by col asc",
- "Table": "`user`, user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# optimize group by when using distinct with no aggregation
-"select distinct col1, col2 from user group by col1, col2"
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1, col2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1, 0, 1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)",
- "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC, (1|3) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc, col1 asc, col2 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct col1, col2 from user group by col1, col2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|3)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2",
- "OrderBy": "(0|2) ASC, (1|3) ASC",
- "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# do not use distinct when using only aggregates and no group by
-"select distinct count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct count(*) from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Grouping on join
-"select user.a from user join user_extra group by user.a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.a from user join user_extra group by user.a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as a",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
- "OrderBy": "(0|1) ASC",
- "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Cannot have more than one aggr(distinct...
-"select count(distinct a), count(distinct b) from user"
-"unsupported: only one distinct aggregation allowed in a select: count(distinct b)"
-Gen4 plan same as above
-
-# multiple distinct functions with grouping.
-"select col1, count(distinct col2), sum(distinct col2) from user group by col1"
-"unsupported: only one distinct aggregation allowed in a select: sum(distinct col2)"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(distinct col2), sum(distinct col2) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|4) AS count(distinct col2), sum_distinct(2|4) AS sum(distinct col2)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregate query with order by aggregate column along with NULL
-"select col, count(*) k from user group by col order by null, k"
-"unsupported: in scatter query: complex order by expression: null"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) k from user group by col order by null, k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS k",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as k from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregate query with order by NULL
-"select col, count(*) k from user group by col order by null"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) k from user group by col order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as k from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) k from user group by col order by null",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS k",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as k from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join query on sharding key with group by a unique vindex with having clause.
-"select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) \u003e 10"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) \u003e 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, count(*) as c from `user`, user_extra where 1 != 1 group by `user`.id",
- "Query": "select `user`.id, count(*) as c from `user`, user_extra where `user`.id = user_extra.user_id group by `user`.id having max(`user`.col) \u003e 10",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery on sharding key with group by a unique vindex with having clause.
-"select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) \u003e 10)"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) \u003e 10)",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) \u003e 10 limit 1)",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) \u003e 10)",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) \u003e 10 limit 1)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# aggregation filtering by having on a route
-"select id from user group by id having count(id) = 10"
-{
- "QueryType": "SELECT",
- "Original": "select id from user group by id having count(id) = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 group by id",
- "Query": "select id from `user` group by id having count(id) = 10",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user group by id having count(id) = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 group by id",
- "Query": "select id from `user` group by id having count(id) = 10",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# weight_string addition to group by
-"select lower(textcol1) as v, count(*) from user group by v"
-{
- "QueryType": "SELECT",
- "Original": "select lower(textcol1) as v, count(*) from user group by v",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select lower(textcol1) as v, count(*) from user group by v",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# weight_string addition to group by when also there in order by
-"select char_length(texcol1) as a, count(*) from user group by a order by a"
-{
- "QueryType": "SELECT",
- "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by inside and outside parenthesis select
-"(select id from user order by 1 desc) order by 1 asc limit 2"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by 1 asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) order by 1 asc limit 2",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# correlated subquery in exists clause with an ordering
-"select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by id"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by id",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_id": 0
- },
- "ProjectedIndexes": "-2,-1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, col, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select `user`.id, col, weight_string(id) from `user` order by id asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id \u003c :user_id",
- "Table": "user_extra",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Column and Literal equality filter on scatter aggregates
-"select count(*) a from user having a = 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a = 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 = 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Equality filtering with column and string literal on scatter aggregates
-"select count(*) a from user having a = '1'"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a = '1'",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 = '1'",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column and Literal not equal filter on scatter aggregates
-"select count(*) a from user having a != 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a != 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 != 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Not equal filter with column and string literal on scatter aggregates
-"select count(*) a from user having a != '1'"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a != '1'",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 != '1'",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Greater than filter on scatter aggregates
-"select count(*) a from user having a \u003e 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003e 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003e 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Greater Equal filter on scatter aggregates
-"select count(*) a from user having a \u003e= 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003e= 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003e= 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Less than filter on scatter aggregates
-"select count(*) a from user having a \u003c 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003c 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003c 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Less Equal filter on scatter aggregates
-"select count(*) a from user having a \u003c= 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a \u003c= 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 \u003c= 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Less Equal filter on scatter with grouping
-"select col, count(*) a from user group by col having a \u003c= 10"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) a from user group by col having a \u003c= 10",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":1 \u003c= 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS a",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*) as a from `user` where 1 != 1 group by col",
- "OrderBy": "0 ASC",
- "Query": "select col, count(*) as a from `user` group by col order by col asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# We should be able to find grouping keys on ordered aggregates
-"select count(*) as a, val1 from user group by val1 having a = 1.00"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":0 = 1.00",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(0) AS a",
- "GroupBy": "(1|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct on text column with collation
-"select col, count(distinct textcol1) from user group by col"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct textcol1) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS count(distinct textcol1)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, textcol1, weight_string(textcol1) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol1)",
- "OrderBy": "0 ASC, (1|2) ASC",
- "Query": "select col, textcol1, weight_string(textcol1) from `user` group by col, textcol1, weight_string(textcol1) order by col asc, textcol1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col, count(distinct textcol1) from user group by col",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1 COLLATE latin1_swedish_ci) AS count(distinct textcol1)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, textcol1 from `user` where 1 != 1 group by col, textcol1",
- "OrderBy": "0 ASC, 1 ASC COLLATE latin1_swedish_ci",
- "Query": "select col, textcol1 from `user` group by col, textcol1 order by col asc, textcol1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation filtering by having on a route with no group by with non-unique vindex filter
-"select 1 from user having count(id) = 10 and name = 'a'"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10 and name = 'a'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` having count(id) = 10 and `name` = 'a'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"a\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10 and name = 'a'",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"a\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
- "Query": "select 1, count(id) from `user` where `name` = 'a'",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Aggregates and joins
-"select count(*) from user join user_extra"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user join user_extra",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] * [COLUMN 1] as count(*)"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# aggregation filtering by having on a route with no group by
-"select 1 from user having count(id) = 10"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` having count(id) = 10",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user having count(id) = 10",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(id) from `user` where 1 != 1",
- "Query": "select 1, count(id) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Aggregate on join
-"select user.a, count(*) from user join user_extra group by user.a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, count(*) from user join user_extra group by user.a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as a",
- "[COLUMN 2] * [COLUMN 3] as count(*)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Aggregate on other table in join
-"select user.a, count(user_extra.a) from user join user_extra group by user.a"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, count(user_extra.a) from user join user_extra group by user.a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count(user_extra.a)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as a",
- "[COLUMN 2] * [COLUMN 3] as count(user_extra.a)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(*), `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(user_extra.a) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(user_extra.a) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# aggregation spread out across three routes
-"select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count(u.textcol1), sum_count(1) AS count(ue.foo)",
- "GroupBy": "(2|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "([COLUMN 2] * [COLUMN 3]) * [COLUMN 4] as count(u.textcol1)",
- "([COLUMN 5] * [COLUMN 6]) * [COLUMN 7] as count(ue.foo)",
- "[COLUMN 0] as bar",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:1,R:2,R:3,L:2,R:4,R:5",
- "JoinVars": {
- "u_foo": 0
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u where 1 != 1 group by u.foo, weight_string(u.foo)",
- "Query": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u group by u.foo, weight_string(u.foo)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
- "JoinVars": {
- "ue_bar": 0
- },
- "TableName": "user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where 1 != 1 group by ue.bar, weight_string(ue.bar)",
- "Query": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where ue.bar = :u_foo group by ue.bar, weight_string(ue.bar)",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where 1 != 1 group by us.bar, weight_string(us.bar)",
- "Query": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where us.baz = :ue_bar group by us.bar, weight_string(us.bar)",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# using two distinct columns - min with distinct vindex, sum with distinct without vindex
-"select col1, min(distinct id), sum(distinct col3) from user group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1), sum_distinct_sum(2) AS sum(distinct col3)",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(col3)",
- "OrderBy": "(0|3) ASC, (2|4) ASC",
- "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(col3) order by col1 asc, col3 asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1) AS min(distinct id), sum_distinct(2|4) AS sum(distinct col3)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, weight_string(col1), col3, weight_string(col3)",
- "OrderBy": "(0|3) ASC, (2|4) ASC",
- "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, weight_string(col1), col3, weight_string(col3) order by col1 asc, col3 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation on top of semijoin
-"select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 1] as count(*)"
- ],
- "Inputs": [
- {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_apa": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` where 1 != 1 group by `user`.apa, weight_string(`user`.apa)",
- "Query": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` group by `user`.apa, weight_string(`user`.apa)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.bar = :user_apa",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# we have to track the order of distinct aggregation expressions
-"select val2, count(distinct val1), count(*) from user group by val2"
-{
- "QueryType": "SELECT",
- "Original": "select val2, count(distinct val1), count(*) from user group by val2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct_count(1) AS count(distinct val1), sum_count(2) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select val2, count(distinct val1), count(*) from user group by val2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|4) AS count(distinct val1), sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, weight_string(val2), val1, weight_string(val1)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, weight_string(val2), val1, weight_string(val1) order by val2 asc, val1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by column alias
-"select ascii(val1) as a, count(*) from user group by a"
-{
- "QueryType": "SELECT",
- "Original": "select ascii(val1) as a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ascii(val1) as a, count(*) from user group by a",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))",
- "OrderBy": "(0|2) ASC",
- "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple distinct aggregations on the same column is allowed
-"select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1"
-"unsupported: only one distinct aggregation allowed in a select: sum(distinct tcol2)"
-{
- "QueryType": "SELECT",
- "Original": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|4) AS count(distinct tcol2), sum_distinct(2|4) AS sum(distinct tcol2)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2)",
- "OrderBy": "(0|3) ASC, (1|4) ASC",
- "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2) order by tcol1 asc, tcol2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple distinct aggregations on the same column in different positions
-"select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1"
-"unsupported: only one distinct aggregation allowed in a select: sum(distinct tcol2)"
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(0|4) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|4) AS sum(distinct tcol2)",
- "GroupBy": "(1|5)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` where 1 != 1 group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1)",
- "OrderBy": "(1|5) ASC, (0|4) ASC",
- "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1) order by tcol1 asc, tcol2 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# distinct aggregation will 3 table join query
-"select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_distinct(1|2) AS count(distinct u.val2)",
- "GroupBy": "0 COLLATE latin1_swedish_ci",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as textcol1",
- "[COLUMN 1] as val2",
- "[COLUMN 2]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:2,L:3,L:5",
- "JoinVars": {
- "u2_val2": 0
- },
- "TableName": "`user`_`user`_music",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,L:2,L:0,R:1,L:1",
- "JoinVars": {
- "u_val2": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u where 1 != 1 group by u.val2, weight_string(u.val2), u.textcol1",
- "OrderBy": "2 ASC COLLATE latin1_swedish_ci, (0|1) ASC",
- "Query": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u group by u.val2, weight_string(u.val2), u.textcol1 order by u.textcol1 asc, u.val2 asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u2.val2, weight_string(u2.val2) from `user` as u2 where 1 != 1 group by u2.val2, weight_string(u2.val2)",
- "Query": "select u2.val2, weight_string(u2.val2) from `user` as u2 where u2.id = :u_val2 group by u2.val2, weight_string(u2.val2)",
- "Table": "`user`",
- "Values": [
- ":u_val2"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.id = :u2_val2",
- "Table": "music",
- "Values": [
- ":u2_val2"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# interleaving grouping, aggregation and join
-"select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "min(1) AS min(user_extra.foo), max(3) AS max(user_extra.bar)",
- "GroupBy": "0, (2|4)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as col",
- "[COLUMN 3] as min(user_extra.foo)",
- "[COLUMN 1] as bar",
- "[COLUMN 4] as max(user_extra.bar)",
- "[COLUMN 2]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,R:1,R:2",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1 group by `user`.col, `user`.bar, weight_string(`user`.bar)",
- "OrderBy": "0 ASC, (1|2) ASC",
- "Query": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` group by `user`.col, `user`.bar, weight_string(`user`.bar) order by `user`.col asc, `user`.bar asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where user_extra.bar = :user_col group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# group_concat on single shards
-"select group_concat(user_id order by name), id from user group by id"
-{
- "QueryType": "SELECT",
- "Original": "select group_concat(user_id order by name), id from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
- "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select group_concat(user_id order by name), id from user group by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id",
- "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select count(distinct user_id, name) from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct user_id, name) from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
- "Query": "select count(distinct user_id, `name`) from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(distinct user_id, name) from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1",
- "Query": "select count(distinct user_id, `name`) from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-"select count(distinct user_id, name) from user"
-"unsupported: only one expression allowed inside aggregates: count(distinct user_id, `name`)"
-Gen4 error: aggregate functions take a single argument 'count(distinct user_id, `name`)'
-
-"select sum(col) from (select user.col as col, 32 from user join user_extra) t"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select sum(col) from (select user.col as col, 32 from user join user_extra) t",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS sum(col)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] * [COLUMN 3] as sum(col)"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col as col, 32, sum(col) from `user` where 1 != 1",
- "Query": "select `user`.col as col, 32, sum(col) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# find aggregation expression and use column offset in filter
-"select foo, count(*) from user group by foo having count(*) = 3"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo, count(*) from user group by foo having count(*) = 3",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 3",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|2) ASC",
- "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# find aggregation expression and use column offset in filter times two
-"select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1,
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 + :2 = 42",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|3) ASC",
- "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# find aggregation expression and use column offset in filter times three
-"select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1,
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 + :2 = 42",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|3) ASC",
- "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# having should be able to add new aggregation expressions in having
-"select foo from user group by foo having count(*) = 3"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select foo from user group by foo having count(*) = 3",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 3",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)",
- "OrderBy": "(0|2) ASC",
- "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 3",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count(u.`name`)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as id",
- "[COLUMN 2] * [COLUMN 3] as count(u.`name`)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
- "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
- "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id where 1 != 1 group by u.id",
- "Query": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id group by u.id having count(u.`name`) = 3",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u, user_extra as ue where 1 != 1 group by u.id",
- "Query": "select u.id from `user` as u, user_extra as ue where ue.user_id = u.id group by u.id having count(u.`name`) = 3",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# only extract the aggregation once, even if used twice
-"select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) \u003c 3 and count(*) \u003e 5"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) \u003c 3 and count(*) \u003e 5",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 \u003c 3 and :1 \u003e 5",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as id",
- "[COLUMN 2] * [COLUMN 3] as count(*)",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)",
- "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)",
- "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select (select 1 from user u having count(ue.col) \u003e 10) from user_extra ue"
-"symbol ue.col not found in subquery"
-{
- "QueryType": "SELECT",
- "Original": "select (select 1 from user u having count(ue.col) \u003e 10) from user_extra ue",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 \u003e 10",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS 1, sum_count(1) AS count(ue.col)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(ue.col) from `user` as u where 1 != 1",
- "Query": "select 1, count(ue.col) from `user` as u",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from user_extra as ue where 1 != 1",
- "Query": "select :__sq1 from user_extra as ue",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# group by and ',' joins with condition
-"select user.col from user join user_extra on user_extra.col = user.col group by user.id"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user_extra.col = user.col group by user.id",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS col",
- "GroupBy": "(2|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] * [COLUMN 3] as col",
- "[COLUMN 1]",
- "[COLUMN 0] as id"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:0,R:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.col, `user`.id, weight_string(`user`.id)",
- "OrderBy": "(1|2) ASC",
- "Query": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` group by `user`.col, `user`.id, weight_string(`user`.id) order by `user`.id asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra where user_extra.col = :user_col group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# scatter aggregate symtab lookup error
-"select id, b as id, count(*) from user order by id"
-"ambiguous symbol reference: id"
-{
- "QueryType": "SELECT",
- "Original": "select id, b as id, count(*) from user order by id",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, random(1) AS id, sum_count_star(2) AS count(*)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, b as id, count(*), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select id, b as id, count(*), weight_string(b) from `user` order by id asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggr and non-aggr without group by (with query does not give useful result out)
-"select id, count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(1) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# group by and ',' joins
-"select user.id from user, user_extra group by id"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user, user_extra group by id",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS id",
- "GroupBy": "(2|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] * [COLUMN 3] as id",
- "[COLUMN 1]",
- "[COLUMN 0] as id"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:0,R:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, weight_string(id) from `user` where 1 != 1 group by id, weight_string(id)",
- "OrderBy": "(0|1) ASC",
- "Query": "select `user`.id, weight_string(id) from `user` group by id, weight_string(id) order by id asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1",
- "Query": "select 1, count(*) from user_extra group by 1",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# count on column from LIMIT
-"select count(city) from (select phone, id, city from user where id \u003e 12 limit 10) as x"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(city) from (select phone, id, city from user where id \u003e 12 limit 10) as x",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count(0) AS count(city)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] as count(city)"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select phone, id, city from `user` where 1 != 1",
- "Query": "select phone, id, city from `user` where id \u003e 12 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count(*) on column from LIMIT
-"select count(*) from (select phone, id, city from user where id \u003e 12 limit 10) as x"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) from (select phone, id, city from user where id \u003e 12 limit 10) as x",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as count(*)"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select phone, id, city from `user` where 1 != 1",
- "Query": "select phone, id, city from `user` where id \u003e 12 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# count non-null columns incoming from outer joins should work well
-"select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x"
-{
- "QueryType": "SELECT",
- "Original": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "count(0) AS count(col)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as count(col)"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col as col from user_extra where 1 != 1",
- "Query": "select user_extra.col as col from user_extra where user_extra.id = :user_id",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# grouping on data from derived table
-"select val1, count(*) from (select id, val1 from user where val2 \u003c 4 order by val1 limit 2) as x group by val1"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select val1, count(*) from (select id, val1 from user where val2 \u003c 4 order by val1 limit 2) as x group by val1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 1] as val1",
- "[COLUMN 0] as count(*)",
- "[COLUMN 2]"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, val1, weight_string(val1) from `user` where 1 != 1",
- "OrderBy": "(1|2) ASC, (1|2) ASC",
- "Query": "select id, val1, weight_string(val1) from `user` where val2 \u003c 4 order by val1 asc, val1 asc limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Can't inline derived table when it has HAVING with aggregation function
-"select * from (select id from user having count(*) = 1) s"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select id from user having count(*) = 1) s",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select id from `user` where 1 != 1) as s where 1 != 1",
- "Query": "select * from (select id from `user` having count(*) = 1) as s",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select id from user having count(*) = 1) s",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": ":1 = 1",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) from `user` where 1 != 1",
- "Query": "select id, count(*) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.json b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.json
new file mode 100644
index 00000000000..07789fd9dea
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.json
@@ -0,0 +1,173 @@
+[
+ {
+ "comment": "Create vindex",
+ "query": "alter vschema create vindex hash_vdx using hash",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema create vindex hash_vdx using hash",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema create vindex hash_vdx using `hash`"
+ },
+ "TablesUsed": [
+ "main.hash_vdx"
+ ]
+ }
+ },
+ {
+ "comment": "Create vindex with qualifier",
+ "query": "alter vschema create vindex user.hash_vdx using hash",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema create vindex user.hash_vdx using hash",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "query": "alter vschema create vindex `user`.hash_vdx using `hash`"
+ },
+ "TablesUsed": [
+ "user.hash_vdx"
+ ]
+ }
+ },
+ {
+ "comment": "Drop vindex",
+ "query": "alter vschema drop vindex hash_vdx",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema drop vindex hash_vdx",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema drop vindex hash_vdx"
+ },
+ "TablesUsed": [
+ "main.hash_vdx"
+ ]
+ }
+ },
+ {
+ "comment": "Add table",
+ "query": "alter vschema add table a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema add table a",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema add table a"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Add sequence",
+ "query": "alter vschema add sequence a_seq",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema add sequence a_seq",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema add sequence a_seq"
+ },
+ "TablesUsed": [
+ "main.a_seq"
+ ]
+ }
+ },
+ {
+ "comment": "Add auto_increment with qualifier",
+ "query": "alter vschema on user.a add auto_increment id using a_seq",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema on user.a add auto_increment id using a_seq",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "query": "alter vschema on `user`.a add auto_increment id using a_seq"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "Drop table",
+ "query": "alter vschema drop table a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema drop table a",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema drop table a"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Add Vindex",
+ "query": "alter vschema on a add vindex hash (id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema on a add vindex hash (id)",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema on a add vindex `hash` (id)"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Drop Vindex",
+ "query": "alter vschema on a drop vindex hash",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter vschema on a drop vindex hash",
+ "Instructions": {
+ "OperatorType": "AlterVSchema",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "alter vschema on a drop vindex `hash`"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt b/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt
deleted file mode 100644
index c46df7b18e6..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/alterVschema_cases.txt
+++ /dev/null
@@ -1,170 +0,0 @@
-# Create vindex
-"alter vschema create vindex hash_vdx using hash"
-{
- "QueryType": "DDL",
- "Original": "alter vschema create vindex hash_vdx using hash",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema create vindex hash_vdx using `hash`"
- },
- "TablesUsed": [
- "main.hash_vdx"
- ]
-}
-Gen4 plan same as above
-
-# Create vindex with qualifier
-"alter vschema create vindex user.hash_vdx using hash"
-{
- "QueryType": "DDL",
- "Original": "alter vschema create vindex user.hash_vdx using hash",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "query": "alter vschema create vindex `user`.hash_vdx using `hash`"
- },
- "TablesUsed": [
- "user.hash_vdx"
- ]
-}
-Gen4 plan same as above
-
-# Drop vindex
-"alter vschema drop vindex hash_vdx"
-{
- "QueryType": "DDL",
- "Original": "alter vschema drop vindex hash_vdx",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema drop vindex hash_vdx"
- },
- "TablesUsed": [
- "main.hash_vdx"
- ]
-}
-Gen4 plan same as above
-
-# Add table
-"alter vschema add table a"
-{
- "QueryType": "DDL",
- "Original": "alter vschema add table a",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema add table a"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Add sequence
-"alter vschema add sequence a_seq"
-{
- "QueryType": "DDL",
- "Original": "alter vschema add sequence a_seq",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema add sequence a_seq"
- },
- "TablesUsed": [
- "main.a_seq"
- ]
-}
-Gen4 plan same as above
-
-# Add auto_increment with qualifier
-"alter vschema on user.a add auto_increment id using a_seq"
-{
- "QueryType": "DDL",
- "Original": "alter vschema on user.a add auto_increment id using a_seq",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "query": "alter vschema on `user`.a add auto_increment id using a_seq"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-# Drop table
-"alter vschema drop table a"
-{
- "QueryType": "DDL",
- "Original": "alter vschema drop table a",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema drop table a"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Add Vindex
-"alter vschema on a add vindex hash (id)"
-{
- "QueryType": "DDL",
- "Original": "alter vschema on a add vindex hash (id)",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema on a add vindex `hash` (id)"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Drop Vindex
-"alter vschema on a drop vindex hash"
-{
- "QueryType": "DDL",
- "Original": "alter vschema on a drop vindex hash",
- "Instructions": {
- "OperatorType": "AlterVSchema",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "alter vschema on a drop vindex `hash`"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.json b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.json
new file mode 100644
index 00000000000..10e1884cccc
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.json
@@ -0,0 +1,166 @@
+[
+ {
+ "comment": "select bypass",
+ "query": "select count(*), col from unsharded",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "select count(*), col from unsharded"
+ }
+ }
+ },
+ {
+ "comment": "update bypass",
+ "query": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
+ }
+ }
+ },
+ {
+ "comment": "update bypass autocommit",
+ "query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "MultishardAutocommit": true,
+ "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ `user` set val = 1 where id = 18446744073709551616 and id = 1"
+ }
+ }
+ },
+ {
+ "comment": "delete bypass",
+ "query": "DELETE FROM USER WHERE ID = 42",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM USER WHERE ID = 42",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "delete from `USER` where ID = 42"
+ }
+ }
+ },
+ {
+ "comment": "insert bypass: not supported",
+ "query": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
+ "plan": "INSERT not supported when targeting a key range: targetString"
+ },
+ {
+ "comment": "bypass query for into outfile s3",
+ "query": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "comment": "Select outfile",
+ "query": "select * from user into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "select * from `user` into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "query": "load data from s3 'x.txt' into table x",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt' into table x",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt' into table x",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "query": "load data from s3 'x.txt'",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt'",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "create table",
+ "query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt b/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt
deleted file mode 100644
index a9bb3e93249..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/bypass_keyrange_cases.txt
+++ /dev/null
@@ -1,163 +0,0 @@
-# select bypass
-"select count(*), col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "select count(*), col from unsharded"
- }
-}
-Gen4 plan same as above
-
-# update bypass
-"update user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
- }
-}
-Gen4 plan same as above
-
-# update bypass autocommit
-"update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "MultishardAutocommit": true,
- "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ `user` set val = 1 where id = 18446744073709551616 and id = 1"
- }
-}
-Gen4 plan same as above
-
-# delete bypass
-"DELETE FROM USER WHERE ID = 42"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM USER WHERE ID = 42",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "delete from `USER` where ID = 42"
- }
-}
-Gen4 plan same as above
-
-# insert bypass: not supported
-"INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')"
-"INSERT not supported when targeting a key range: targetString"
-Gen4 plan same as above
-
-# bypass query for into outfile s3
-"select count(*), col from unsharded into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-# Select outfile
-"select * from user into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select * from user into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "select * from `user` into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt' into table x"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt' into table x",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt' into table x",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt'"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt'",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# create table
-"create /* test */ table t1(id bigint, primary key(id)) /* comments */"
-{
- "QueryType": "DDL",
- "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json
new file mode 100644
index 00000000000..6f2be325b6b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json
@@ -0,0 +1,178 @@
+[
+ {
+ "comment": "select bypass",
+ "query": "select count(*), col from unsharded",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "select count(*), col from unsharded"
+ }
+ }
+ },
+ {
+ "comment": "update bypass",
+ "query": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
+ }
+ }
+ },
+ {
+ "comment": "delete bypass",
+ "query": "DELETE FROM USER WHERE ID = 42",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM USER WHERE ID = 42",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "delete from `USER` where ID = 42"
+ }
+ }
+ },
+ {
+ "comment": "insert bypass",
+ "query": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "insert into `USER`(ID, `NAME`) values (42, 'ms X')"
+ }
+ }
+ },
+ {
+ "comment": "insert bypass with sequence: sequences ignored",
+ "query": "insert into user(nonid) values (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid) values (2)",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "insert into `user`(nonid) values (2)"
+ }
+ }
+ },
+ {
+ "comment": "bypass query for into outfile s3",
+ "query": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "comment": "Select outfile",
+ "query": "select * from user into outfile S3 'x.txt'",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user into outfile S3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "select * from `user` into outfile s3 'x.txt'"
+ }
+ }
+ },
+ {
+ "query": "load data from s3 'x.txt' into table x",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt' into table x",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt' into table x",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "query": "load data from s3 'x.txt'",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "load data from s3 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "IsDML": true,
+ "Query": "load data from s3 'x.txt'",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "create table",
+ "query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "Shard(-80)",
+ "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt
deleted file mode 100644
index 17f697535e4..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.txt
+++ /dev/null
@@ -1,175 +0,0 @@
-# select bypass
-"select count(*), col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "select count(*), col from unsharded"
- }
-}
-Gen4 plan same as above
-
-# update bypass
-"update user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1"
- }
-}
-Gen4 plan same as above
-
-# delete bypass
-"DELETE FROM USER WHERE ID = 42"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM USER WHERE ID = 42",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "delete from `USER` where ID = 42"
- }
-}
-Gen4 plan same as above
-
-# insert bypass
-"INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO USER (ID, NAME) VALUES (42, 'ms X')",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "insert into `USER`(ID, `NAME`) values (42, 'ms X')"
- }
-}
-Gen4 plan same as above
-
-# insert bypass with sequence: sequences ignored
-"insert into user(nonid) values (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid) values (2)",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "insert into `user`(nonid) values (2)"
- }
-}
-Gen4 plan same as above
-
-# bypass query for into outfile s3
-"select count(*), col from unsharded into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select count(*), col from unsharded into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "select count(*), col from unsharded into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-# Select outfile
-"select * from user into outfile S3 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select * from user into outfile S3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "select * from `user` into outfile s3 'x.txt'"
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt' into table x"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt' into table x",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt' into table x",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-"load data from s3 'x.txt'"
-{
- "QueryType": "OTHER",
- "Original": "load data from s3 'x.txt'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "IsDML": true,
- "Query": "load data from s3 'x.txt'",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# create table
-"create /* test */ table t1(id bigint, primary key(id)) /* comments */"
-{
- "QueryType": "DDL",
- "Original": "create /* test */ table t1(id bigint, primary key(id)) /* comments */",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "Shard(-80)",
- "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/call_cases.json b/go/vt/vtgate/planbuilder/testdata/call_cases.json
new file mode 100644
index 00000000000..b730ac8384b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/call_cases.json
@@ -0,0 +1,58 @@
+[
+ {
+ "comment": "simple call proc on current keyspace",
+ "query": "call proc()",
+ "plan": {
+ "QueryType": "CALL_PROC",
+ "Original": "call proc()",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "call proc()"
+ }
+ }
+ },
+ {
+ "comment": "call qualified keyspace",
+ "query": "call main.proc()",
+ "plan": {
+ "QueryType": "CALL_PROC",
+ "Original": "call main.proc()",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "call proc()"
+ }
+ }
+ },
+ {
+ "comment": "CALL not allowed on sharded keyspaces",
+ "query": "call user.proc()",
+ "plan": "CALL is not supported for sharded database"
+ },
+ {
+ "comment": "CALL with expressions and parameters",
+ "query": "call proc(1, 'foo', @var)",
+ "plan": {
+ "QueryType": "CALL_PROC",
+ "Original": "call proc(1, 'foo', @var)",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "call proc(1, 'foo', :__vtudvvar)"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/call_cases.txt b/go/vt/vtgate/planbuilder/testdata/call_cases.txt
deleted file mode 100644
index eb9e0277c84..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/call_cases.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-# simple call proc on current keyspace
-"call proc()"
-{
- "QueryType": "CALL_PROC",
- "Original": "call proc()",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "call proc()"
- }
-}
-Gen4 plan same as above
-
-# call qualified keyspace
-"call main.proc()"
-{
- "QueryType": "CALL_PROC",
- "Original": "call main.proc()",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "call proc()"
- }
-}
-Gen4 plan same as above
-
-# CALL not allowed on sharded keyspaces
-"call user.proc()"
-"CALL is not supported for sharded database"
-Gen4 plan same as above
-
-# CALL with expressions and parameters
-"call proc(1, 'foo', @var)"
-{
- "QueryType": "CALL_PROC",
- "Original": "call proc(1, 'foo', @var)",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "call proc(1, 'foo', :__vtudvvar)"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json
new file mode 100644
index 00000000000..25d8fd23e81
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json
@@ -0,0 +1,516 @@
+[
+ {
+ "comment": "simple create table",
+ "query": "create table t1(id bigint, primary key(id))",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table t1(id bigint, primary key(id))",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
+ },
+ "TablesUsed": [
+ "main.t1"
+ ]
+ }
+ },
+ {
+ "comment": "simple create table with keyspace",
+ "query": "create table user.t1(id bigint, primary key(id))",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table user.t1(id bigint, primary key(id))",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
+ },
+ "TablesUsed": [
+ "user.t1"
+ ]
+ }
+ },
+ {
+ "comment": "DDL",
+ "query": "create table a(id int)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table a(id int)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create table a (\n\tid int\n)"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "simple create table with table qualifier that does not exists",
+ "query": "create table a.b(id int)",
+ "plan": "Unknown database 'a' in vschema"
+ },
+ {
+ "comment": "Alter table",
+ "query": "alter table a ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table a ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "alter table a add column id int"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with qualifier",
+ "query": "alter table user.user ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table user.user ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add column id int"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with qualifier and table not in vschema",
+ "query": "alter table user.a ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table user.a ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table a add column id int"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with unknown qualifier",
+ "query": "alter table a.b ADD id int",
+ "plan": "Unknown database 'a' in vschema"
+ },
+ {
+ "comment": "create db foo",
+ "query": "create database foo",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create database foo",
+ "Instructions": {
+ "OperatorType": "CREATEDB",
+ "Keyspace": {
+ "Name": "foo",
+ "Sharded": false
+ }
+ }
+ }
+ },
+ {
+ "comment": "create db main",
+ "query": "create database main",
+ "plan": "Can't create database 'main'; database exists"
+ },
+ {
+ "comment": "create db if not exists main",
+ "query": "create database if not exists main",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create database if not exists main",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "alter db foo",
+ "query": "alter database foo collate utf8",
+ "plan": "Can't alter database 'foo'; unknown database"
+ },
+ {
+ "comment": "alter db main",
+ "query": "alter database main collate utf8",
+ "plan": "alter database is not supported"
+ },
+ {
+ "comment": "drop db foo",
+ "query": "drop database foo",
+ "plan": "Can't drop database 'foo'; database doesn't exists"
+ },
+ {
+ "comment": "drop db main",
+ "query": "drop database main",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop database main",
+ "Instructions": {
+ "OperatorType": "DROPDB",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ }
+ }
+ }
+ },
+ {
+ "comment": "drop db if exists main",
+ "query": "drop database if exists main",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop database if exists main",
+ "Instructions": {
+ "OperatorType": "DROPDB",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ }
+ }
+ }
+ },
+ {
+ "comment": "drop db if exists foo",
+ "query": "drop schema if exists foo",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop schema if exists foo",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "DDL with qualifier",
+ "query": "create index a on user.user(id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create index a on user.user(id)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add index a (id)"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "DDL with qualifier for a table not in vschema of an unsharded keyspace",
+ "query": "create index a on main.unknown(id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create index a on main.unknown(id)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "alter table unknown add index a (id)"
+ },
+ "TablesUsed": [
+ "main.unknown"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace",
+ "query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with IN clause",
+ "query": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with UNION clause",
+ "query": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id from unsharded union select id from unsharded_auto order by id asc limit 5"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with multiple UNION clauses",
+ "query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with subquery in unsharded keyspace with UNION clauses in subqueries",
+ "query": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "Alter View",
+ "query": "alter view user.user_extra as select* from user.user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter view user.user_extra as select* from user.user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter view user_extra as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "drop table without qualifier",
+ "query": "drop table unsharded_a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop table unsharded_a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "drop table unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "Drop view",
+ "query": "drop view main.a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop view main.a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "drop view a"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Truncate table with qualifier",
+ "query": "truncate user.user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "truncate user.user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "truncate table user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Rename table",
+ "query": "rename table a to main.b",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "rename table a to main.b",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "rename table a to b"
+ },
+ "TablesUsed": [
+ "main.a",
+ "main.b"
+ ]
+ }
+ },
+ {
+ "comment": "CREATE temp TABLE",
+ "query": "create temporary table a(id int)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create temporary table a(id int)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create temporary table a (\n\tid int\n)",
+ "TempTable": true
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "DROP temp TABLE",
+ "query": "drop temporary table a",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop temporary table a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "drop temporary table a",
+ "TempTable": true
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "create table with function as a default value",
+ "query": "create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create table function_default (\n\tx varchar(25) default (trim(' check '))\n)"
+ },
+ "TablesUsed": [
+ "main.function_default"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt b/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt
deleted file mode 100644
index 74b23a47c63..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.txt
+++ /dev/null
@@ -1,512 +0,0 @@
-# simple create table
-"create table t1(id bigint, primary key(id))"
-{
- "QueryType": "DDL",
- "Original": "create table t1(id bigint, primary key(id))",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
- },
- "TablesUsed": [
- "main.t1"
- ]
-}
-Gen4 plan same as above
-
-# simple create table with keyspace
-"create table user.t1(id bigint, primary key(id))"
-{
- "QueryType": "DDL",
- "Original": "create table user.t1(id bigint, primary key(id))",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create table t1 (\n\tid bigint,\n\tprimary key (id)\n)"
- },
- "TablesUsed": [
- "user.t1"
- ]
-}
-Gen4 plan same as above
-
-# DDL
-"create table a(id int)"
-{
- "QueryType": "DDL",
- "Original": "create table a(id int)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create table a (\n\tid int\n)"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# simple create table with table qualifier that does not exists
-"create table a.b(id int)"
-"Unknown database 'a' in vschema"
-Gen4 plan same as above
-
-#Alter table
-"alter table a ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table a ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "alter table a add column id int"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with qualifier
-"alter table user.user ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table user.user ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add column id int"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with qualifier and table not in vschema
-"alter table user.a ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table user.a ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table a add column id int"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with unknown qualifier
-"alter table a.b ADD id int"
-"Unknown database 'a' in vschema"
-Gen4 plan same as above
-
-# create db foo
-"create database foo"
-{
- "QueryType": "DDL",
- "Original": "create database foo",
- "Instructions": {
- "OperatorType": "CREATEDB",
- "Keyspace": {
- "Name": "foo",
- "Sharded": false
- }
- }
-}
-Gen4 plan same as above
-
-# create db main
-"create database main"
-"Can't create database 'main'; database exists"
-Gen4 plan same as above
-
-# create db if not exists main
-"create database if not exists main"
-{
- "QueryType": "DDL",
- "Original": "create database if not exists main",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# alter db foo
-"alter database foo collate utf8"
-"Can't alter database 'foo'; unknown database"
-Gen4 plan same as above
-
-# alter db main
-"alter database main collate utf8"
-"alter database is not supported"
-Gen4 plan same as above
-
-# drop db foo
-"drop database foo"
-"Can't drop database 'foo'; database doesn't exists"
-Gen4 plan same as above
-
-# drop db main
-"drop database main"
-{
- "QueryType": "DDL",
- "Original": "drop database main",
- "Instructions": {
- "OperatorType": "DROPDB",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- }
- }
-}
-Gen4 plan same as above
-
-# drop db if exists main
-"drop database if exists main"
-{
- "QueryType": "DDL",
- "Original": "drop database if exists main",
- "Instructions": {
- "OperatorType": "DROPDB",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- }
- }
-}
-Gen4 plan same as above
-
-# drop db if exists foo
-"drop schema if exists foo"
-{
- "QueryType": "DDL",
- "Original": "drop schema if exists foo",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# DDL with qualifier
-"create index a on user.user(id)"
-{
- "QueryType": "DDL",
- "Original": "create index a on user.user(id)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add index a (id)"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# DDL with qualifier for a table not in vschema of an unsharded keyspace
-"create index a on main.unknown(id)"
-{
- "QueryType": "DDL",
- "Original": "create index a on main.unknown(id)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "alter table unknown add index a (id)"
- },
- "TablesUsed": [
- "main.unknown"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace
-"create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-
-# create view with subquery in unsharded keyspace with IN clause
-"create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace with UNION clause
-"create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as (select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id from unsharded union select id from unsharded_auto order by id asc limit 5"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace with multiple UNION clauses
-"create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with subquery in unsharded keyspace with UNION clauses in subqueries
-"create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
-{
- "QueryType": "DDL",
- "Original": "create view view_a as (select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# Alter View
-"alter view user.user_extra as select* from user.user"
-{
- "QueryType": "DDL",
- "Original": "alter view user.user_extra as select* from user.user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter view user_extra as select * from `user`"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# drop table without qualifier
-"drop table unsharded_a"
-{
- "QueryType": "DDL",
- "Original": "drop table unsharded_a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "drop table unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded_a"
- ]
-}
-Gen4 plan same as above
-
-# Drop view
-"drop view main.a"
-{
- "QueryType": "DDL",
- "Original": "drop view main.a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "drop view a"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Truncate table with qualifier
-"truncate user.user_extra"
-{
- "QueryType": "DDL",
- "Original": "truncate user.user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "truncate table user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Rename table
-"rename table a to main.b"
-{
- "QueryType": "DDL",
- "Original": "rename table a to main.b",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "rename table a to b"
- },
- "TablesUsed": [
- "main.a",
- "main.b"
- ]
-}
-Gen4 plan same as above
-
-# CREATE temp TABLE
-"create temporary table a(id int)"
-{
- "QueryType": "DDL",
- "Original": "create temporary table a(id int)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create temporary table a (\n\tid int\n)",
- "TempTable": true
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# DROP temp TABLE
-"drop temporary table a"
-{
- "QueryType": "DDL",
- "Original": "drop temporary table a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "drop temporary table a",
- "TempTable": true
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# create table with function as a default value
-"create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))"
-{
- "QueryType": "DDL",
- "Original": "create table function_default (x varchar(25) DEFAULT (TRIM(' check ')))",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create table function_default (\n\tx varchar(25) default (trim(' check '))\n)"
- },
- "TablesUsed": [
- "main.function_default"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json
new file mode 100644
index 00000000000..fca52d487a0
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json
@@ -0,0 +1,789 @@
+[
+ {
+ "comment": "Create View with qualifier",
+ "query": "create view user.a as select* from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.a as select* from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view a as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with qualifier in select as well",
+ "query": "create view user.a as select* from user.user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.a as select* from user.user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view a as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with No column referenced",
+ "query": "create view user.view_a as select 1 from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select 1 from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select 1 from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with '*' expression for simple route",
+ "query": "create view user.view_a as select user.* from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.* from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.* from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with unqualified '*' expression for simple route",
+ "query": "create view user.view_a as select * from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with fully qualified '*' expression for simple route",
+ "query": "create view user.view_a as select user.user.* from user.user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.user.* from user.user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.* from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from authoritative table",
+ "query": "create view user.view_a as select * from authoritative",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from authoritative"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select user_id, col1, col2 from authoritative"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from join of authoritative tables",
+ "query": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from qualified authoritative table",
+ "query": "create view user.view_a as select a.* from authoritative a",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select a.* from authoritative as a"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select a.user_id, a.col1, a.col2 from authoritative as a"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select * from intermixing of authoritative table with non-authoritative results in no expansion",
+ "query": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from authoritative join `user` on authoritative.user_id = `user`.id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select authoritative.* with intermixing still expands",
+ "query": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "v3-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with auto-resolve anonymous columns for simple route",
+ "query": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with join that can be solved in each shard separately",
+ "query": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.id from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with last_insert_id for unsharded route",
+ "query": "create view main.view_a as select last_insert_id() as x from main.unsharded",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view main.view_a as select last_insert_id() as x from main.unsharded",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Query": "create view view_a as select :__lastInsertId as x from unsharded"
+ },
+ "TablesUsed": [
+ "main.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with select from pinned table",
+ "query": "create view user.view_a as select * from pin_test",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from pin_test",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from pin_test"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Expression with single-route reference",
+ "query": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.col, user_extra.id + user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Comments",
+ "query": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select /* comment */ `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with for update",
+ "query": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id for update"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Case preservation",
+ "query": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select `user`.Col, user_extra.Id from `user` join user_extra on `user`.id = user_extra.user_id"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with syntax error",
+ "query": "create view user.view_a as the quick brown fox",
+ "plan": "syntax error at position 31 near 'the'"
+ },
+ {
+ "comment": "create view with Hex number is not treated as a simple value",
+ "query": "create view user.view_a as select * from user where id = 0x04",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where id = 0x04",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 0x04"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with limit works if it can be dropped",
+ "query": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where `name` = 'abc' and id = 4 limit 5"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Multiple parenthesized expressions",
+ "query": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Multiple parenthesized expressions",
+ "query": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Column Aliasing with Table.Column",
+ "query": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col asc"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Column Aliasing with Column",
+ "query": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with Booleans and parenthesis",
+ "query": "create view user.view_a as select * from user where (id = 1) AND name = true",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from user where (id = 1) AND name = true",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from `user` where id = 1 and `name` = true"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with union with the same target shard",
+ "query": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from music where user_id = 1 union select * from `user` where id = 1"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with testing SingleRow Projection",
+ "query": "create view user.view_a as select 42 from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select 42 from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select 42 from `user`"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "create view with sql_calc_found_rows without limit",
+ "query": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "create view view_a as select * from music where user_id = 1"
+ },
+ "TablesUsed": [
+ "user.view_a"
+ ]
+ }
+ },
+ {
+ "comment": "DDL",
+ "query": "create index a on user(id)",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "create index a on user(id)",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add index a (id)"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Alter table with qualifier",
+ "query": "alter table user ADD id int",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter table user ADD id int",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter table `user` add column id int"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Alter View",
+ "query": "alter view user_extra as select* from user",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "alter view user_extra as select* from user",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "alter view user_extra as select * from `user`"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Alter View with unknown view",
+ "query": "alter view unknown as select* from user",
+ "plan": "keyspace not specified"
+ },
+ {
+ "comment": "drop table with qualifier in one",
+ "query": "drop table user.user, user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop table user.user, user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "drop table `user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "drop table with incompatible tables",
+ "query": "drop table user, unsharded_a",
+ "plan": "Tables or Views specified in the query do not belong to the same destination"
+ },
+ {
+ "comment": "drop table with unknown table",
+ "query": "drop table unknown",
+ "plan": "keyspace not specified"
+ },
+ {
+ "comment": "drop view with 1 view without qualifier",
+ "query": "drop view user.user, user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "drop view user.user, user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "drop view `user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "drop view with incompatible views",
+ "query": "drop view user, unsharded_a",
+ "plan": "Tables or Views specified in the query do not belong to the same destination"
+ },
+ {
+ "comment": "drop view with unknown view",
+ "query": "drop view unknown",
+ "plan": "keyspace not specified"
+ },
+ {
+ "comment": "Truncate table without qualifier",
+ "query": "truncate user_extra",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "truncate user_extra",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "truncate table user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Rename table",
+ "query": "rename table user_extra to b",
+ "plan": {
+ "QueryType": "DDL",
+ "Original": "rename table user_extra to b",
+ "Instructions": {
+ "OperatorType": "DDL",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Query": "rename table user_extra to b"
+ },
+ "TablesUsed": [
+ "user.b",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Rename table with different keyspace tables",
+ "query": "rename table user_extra to b, main.a to b",
+ "plan": "Tables or Views specified in the query do not belong to the same destination"
+ },
+ {
+ "comment": "Rename table with change in keyspace name",
+ "query": "rename table user_extra to main.b",
+ "plan": "Changing schema from 'user' to 'main' is not allowed"
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt
deleted file mode 100644
index 57f9de4003c..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.txt
+++ /dev/null
@@ -1,782 +0,0 @@
-# Create View with qualifier
-"create view user.a as select* from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.a as select* from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view a as select * from `user`"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-# create view with qualifier in select as well
-"create view user.a as select* from user.user"
-{
- "QueryType": "DDL",
- "Original": "create view user.a as select* from user.user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view a as select * from `user`"
- },
- "TablesUsed": [
- "user.a"
- ]
-}
-Gen4 plan same as above
-
-# create view with No column referenced
-"create view user.view_a as select 1 from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select 1 from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select 1 from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with '*' expression for simple route
-"create view user.view_a as select user.* from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.* from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.* from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with unqualified '*' expression for simple route
-"create view user.view_a as select * from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with fully qualified '*' expression for simple route
-"create view user.view_a as select user.user.* from user.user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.user.* from user.user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.* from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with select * from authoritative table
-"create view user.view_a as select * from authoritative"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from authoritative"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select user_id, col1, col2 from authoritative"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with select * from join of authoritative tables
-"create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with select * from qualified authoritative table
-"create view user.view_a as select a.* from authoritative a"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select a.* from authoritative as a"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select a.user_id, a.col1, a.col2 from authoritative as a"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with select * from intermixing of authoritative table with non-authoritative results in no expansion
-"create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from authoritative join user on authoritative.user_id=user.id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from authoritative join `user` on authoritative.user_id = `user`.id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with select authoritative.* with intermixing still expands
-"create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-
-# create view with auto-resolve anonymous columns for simple route
-"create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with join that can be solved in each shard separately
-"create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.id from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.id from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with last_insert_id for unsharded route
-"create view main.view_a as select last_insert_id() as x from main.unsharded"
-{
- "QueryType": "DDL",
- "Original": "create view main.view_a as select last_insert_id() as x from main.unsharded",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Query": "create view view_a as select :__lastInsertId as x from unsharded"
- },
- "TablesUsed": [
- "main.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with select from pinned table
-"create view user.view_a as select * from pin_test"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from pin_test",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from pin_test"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Expression with single-route reference
-"create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.col, user_extra.id + user_extra.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.col, user_extra.id + user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Comments
-"create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select /* comment */ user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select /* comment */ `user`.col from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with for update
-"create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.col from user join user_extra on user.id = user_extra.user_id for update",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id for update"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Case preservation
-"create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user.Col, user_extra.Id from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select `user`.Col, user_extra.Id from `user` join user_extra on `user`.id = user_extra.user_id"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with syntax error
-"create view user.view_a as the quick brown fox"
-"syntax error at position 31 near 'the'"
-Gen4 plan same as above
-
-# create view with Hex number is not treated as a simple value
-"create view user.view_a as select * from user where id = 0x04"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where id = 0x04",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 0x04"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with limit works if it can be dropped
-"create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where name ='abc' AND (id = 4) limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where `name` = 'abc' and id = 4 limit 5"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Multiple parenthesized expressions
-"create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where (id = 4) AND (name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Multiple parenthesized expressions
-"create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where (id = 4 and name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 4 and `name` = 'abc' limit 5"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Column Aliasing with Table.Column
-"create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col asc"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Column Aliasing with Column
-"create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with Booleans and parenthesis
-"create view user.view_a as select * from user where (id = 1) AND name = true"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from user where (id = 1) AND name = true",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from `user` where id = 1 and `name` = true"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with union with the same target shard
-"create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select * from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from music where user_id = 1 union select * from `user` where id = 1"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with testing SingleRow Projection
-"create view user.view_a as select 42 from user"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select 42 from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select 42 from `user`"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# create view with sql_calc_found_rows without limit
-"create view user.view_a as select sql_calc_found_rows * from music where user_id = 1"
-{
- "QueryType": "DDL",
- "Original": "create view user.view_a as select sql_calc_found_rows * from music where user_id = 1",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "create view view_a as select * from music where user_id = 1"
- },
- "TablesUsed": [
- "user.view_a"
- ]
-}
-Gen4 plan same as above
-
-# DDL
-"create index a on user(id)"
-{
- "QueryType": "DDL",
- "Original": "create index a on user(id)",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add index a (id)"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-#Alter table with qualifier
-"alter table user ADD id int"
-{
- "QueryType": "DDL",
- "Original": "alter table user ADD id int",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter table `user` add column id int"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Alter View
-"alter view user_extra as select* from user"
-{
- "QueryType": "DDL",
- "Original": "alter view user_extra as select* from user",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "alter view user_extra as select * from `user`"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Alter View with unknown view
-"alter view unknown as select* from user"
-"keyspace not specified"
-Gen4 plan same as above
-
-# drop table with qualifier in one
-"drop table user.user, user_extra"
-{
- "QueryType": "DDL",
- "Original": "drop table user.user, user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "drop table `user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# drop table with incompatible tables
-"drop table user, unsharded_a"
-"Tables or Views specified in the query do not belong to the same destination"
-Gen4 plan same as above
-
-# drop table with unknown table
-"drop table unknown"
-"keyspace not specified"
-Gen4 plan same as above
-
-# drop view with 1 view without qualifier
-"drop view user.user, user_extra"
-{
- "QueryType": "DDL",
- "Original": "drop view user.user, user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "drop view `user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# drop view with incompatible views
-"drop view user, unsharded_a"
-"Tables or Views specified in the query do not belong to the same destination"
-Gen4 plan same as above
-
-# drop view with unknown view
-"drop view unknown"
-"keyspace not specified"
-Gen4 plan same as above
-
-# Truncate table without qualifier
-"truncate user_extra"
-{
- "QueryType": "DDL",
- "Original": "truncate user_extra",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "truncate table user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Rename table
-"rename table user_extra to b"
-{
- "QueryType": "DDL",
- "Original": "rename table user_extra to b",
- "Instructions": {
- "OperatorType": "DDL",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Query": "rename table user_extra to b"
- },
- "TablesUsed": [
- "user.b",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Rename table with different keyspace tables
-"rename table user_extra to b, main.a to b"
-"Tables or Views specified in the query do not belong to the same destination"
-Gen4 plan same as above
-
-# Rename table with change in keyspace name
-"rename table user_extra to main.b"
-"Changing schema from 'user' to 'main' is not allowed"
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json
new file mode 100644
index 00000000000..0e82da1d592
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json
@@ -0,0 +1,6245 @@
+[
+ {
+ "comment": "update table not found",
+ "query": "update nouser set val = 1",
+ "plan": "table nouser not found"
+ },
+ {
+ "comment": "delete table not found",
+ "query": "delete from nouser",
+ "plan": "table nouser not found"
+ },
+ {
+ "comment": "explicit keyspace reference",
+ "query": "update main.m1 set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update main.m1 set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update m1 set val = 1",
+ "Table": "m1"
+ },
+ "TablesUsed": [
+ "main.m1"
+ ]
+ }
+ },
+ {
+ "comment": "update unsharded",
+ "query": "update unsharded set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set val = 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "subqueries in unsharded update",
+ "query": "update unsharded set col = (select col from unsharded limit 1)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select col from unsharded limit 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select col from unsharded limit 1)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union in subquery of unsharded update",
+ "query": "update unsharded set col = (select id from unsharded union select id from unsharded)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded union select id from unsharded)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded union select id from unsharded)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded join in subquery of unsharded update",
+ "query": "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded as a join unsharded as b on a.id = b.id)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "update with join subquery",
+ "query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: updated of a routed table",
+ "query": "update route1 set a=1 where id=1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update route1 set a=1 where id=1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as route1 set a = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update route1 set a=1 where id=1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as route1 set a = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update: routing rules for subquery.",
+ "query": "update unsharded_a set a=(select a from route2)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a set a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a set a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a set a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a set a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "delete unsharded",
+ "query": "delete from unsharded",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "delete from sequence",
+ "query": "DELETE FROM seq",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM seq",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from seq",
+ "Table": "seq"
+ },
+ "TablesUsed": [
+ "main.seq"
+ ]
+ }
+ },
+ {
+ "comment": "delete from reference table in unsharded keyspace",
+ "query": "DELETE FROM unsharded_ref",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM unsharded_ref",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded_ref",
+ "Table": "unsharded_ref"
+ },
+ "TablesUsed": [
+ "main.unsharded_ref"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id",
+ "query": "update user set val = 1 where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id with alias",
+ "query": "update user as user_alias set val = 1 where user_alias.id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id with parenthesized expression",
+ "query": "update user set val = 1 where (id = 1)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id with multi-part where clause with parens",
+ "query": "update user set val = 1 where (name = 'foo' and id = 1)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, changing one vindex column",
+ "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, changing same vindex twice",
+ "query": "update user_metadata set email = 'a', email = 'b' where user_id = 1",
+ "plan": "column has duplicate set values: 'email'"
+ },
+ {
+ "comment": "update by primary keyspace id, changing multiple vindex columns",
+ "query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "address_user_map:4",
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "address_user_map:4",
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, changing one vindex column, using order by and limit",
+ "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "email_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
+ "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
+ "Table": "user_metadata",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "update changes non owned vindex column",
+ "query": "update music_extra set music_id = 1 where user_id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music_extra set music_id = 1 where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "music_user_map:1"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
+ "Query": "update music_extra set music_id = 1 where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music_extra set music_id = 1 where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "music_user_map:1"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
+ "Query": "update music_extra set music_id = 1 where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, stray where clause",
+ "query": "update user set val = 1 where id = id2 and id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = id2 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = id2 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = id2 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = id2 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update by primary keyspace id, stray where clause with conversion error",
+ "query": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from by primary keyspace id",
+ "query": "delete from user where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
+ "Query": "delete from `user` where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
+ "Query": "delete from `user` where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi-table delete with comma join",
+ "query": "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete a from unsharded_a as a, unsharded_b as b where a.id = b.id and b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "multi-table delete with ansi join",
+ "query": "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete a from unsharded_a as a join unsharded_b as b on a.id = b.id where b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "delete with join from subquery",
+ "query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: deleted from a routed table",
+ "query": "delete from route1 where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
+ "Query": "delete from `user` as route1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
+ "Query": "delete from `user` as route1 where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete: routing rules for subquery",
+ "query": "delete from unsharded_a where a=(select a from route2)",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded_a where a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded_a"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded_a where a=(select a from route2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "update by lookup",
+ "query": "update music set val = 1 where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update music set val = 1 where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update music set val = 1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update music set val = 1 where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "update multi-table ansi join",
+ "query": "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a as a join unsharded_b as b on a.id = b.id set a.val = 'foo' where b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "update multi-table comma join",
+ "query": "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded_a as a, unsharded_b as b set a.val = 'foo' where a.id = b.id and b.val = 1",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "delete from by lookup",
+ "query": "delete from music where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "delete from, no owned vindexes",
+ "query": "delete from music_extra where user_id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music_extra where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from music_extra where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from music_extra where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from music_extra where user_id = 1",
+ "Table": "music_extra",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ }
+ },
+ {
+ "comment": "simple insert, no values",
+ "query": "insert into unsharded values()",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values()",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values ()",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "simple insert unsharded",
+ "query": "insert into unsharded values(1, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values(1, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (1, 2)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "simple upsert unsharded",
+ "query": "insert into unsharded values(1, 2) on duplicate key update x = 3",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values(1, 2) on duplicate key update x = 3",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (1, 2) on duplicate key update x = 3",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert, no col list with auto-inc and authoritative column list",
+ "query": "insert into unsharded_authoritative values(1,1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_authoritative values(1,1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_authoritative(col1, col2) values (:__seq0, 1)",
+ "TableName": "unsharded_authoritative"
+ },
+ "TablesUsed": [
+ "main.unsharded_authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "sharded upsert with sharding key set to vindex column",
+ "query": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0) on duplicate key update user_id = values(user_id)",
+ "TableName": "music",
+ "VindexValues": {
+ "music_user_map": "INT64(2)",
+ "user_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sharded bulk upsert with sharding key set to vindex column",
+ "query": "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0), (:_user_id_1, :_id_1) on duplicate key update user_id = values(user_id)",
+ "TableName": "music",
+ "VindexValues": {
+ "music_user_map": "INT64(2), INT64(4)",
+ "user_index": "INT64(1), INT64(3)"
+ }
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded with select",
+ "query": "insert into unsharded select id from unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded with select with join",
+ "query": "insert into unsharded select id from unsharded join unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded join unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select id from unsharded join unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, invalid value for auto-inc",
+ "query": "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, column present",
+ "query": "insert into unsharded_auto(id, val) values(1, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(id, val) values(1, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, column absent",
+ "query": "insert into unsharded_auto(val) values('aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(val) values('aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(val, id) values ('aa', :__seq0)",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, column absent",
+ "query": "insert into unsharded_auto(val) values(false)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(val) values(false)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(val, id) values (false, :__seq0)",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "insert unsharded, multi-val",
+ "query": "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert subquery in insert value",
+ "query": "insert into unsharded values((select 1 from dual), 1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values((select 1 from dual), 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (1, 1)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "sharded insert subquery in insert value",
+ "query": "insert into user(id, val) values((select 1), 1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id, val) values((select 1), 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert into a routed table",
+ "query": "insert into route1(id) values (1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into route1(id) values (1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with mimatched column list",
+ "query": "insert into user(id) values (1, 2)",
+ "plan": "column list doesn't match values"
+ },
+ {
+ "comment": "insert no column list for sharded authoritative table",
+ "query": "insert into authoritative values(1, 2, 3)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into authoritative values(1, 2, 3)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, 2, 3)",
+ "TableName": "authoritative",
+ "VindexValues": {
+ "user_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "insert sharded, no values",
+ "query": "insert into user values()",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user values()",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with one vindex",
+ "query": "insert into user(id) values (1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values (1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert ignore sharded",
+ "query": "insert ignore into user(id) values (1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert ignore into user(id) values (1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert ignore into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert on duplicate key",
+ "query": "insert into user(id) values(1) on duplicate key update col = 2",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values(1) on duplicate key update col = 2",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "InsertIgnore": true,
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with one vindex and bind var",
+ "query": "insert into user(id) values (:aa)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values (:aa)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with non vindex",
+ "query": "insert into user(nonid) values (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid) values (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with default seq",
+ "query": "insert into user(id, nonid) values (default, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id, nonid) values (default, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with non vindex bool value",
+ "query": "insert into user(nonid) values (true)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid) values (true)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "NULL",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with all vindexes supplied",
+ "query": "insert into user(nonid, name, id) values (2, 'foo', 1)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(nonid, name, id) values (2, 'foo', 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL",
+ "name_user_map": "VARCHAR(\"foo\")",
+ "user_index": ":__seq0"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert for non-vindex autoinc",
+ "query": "insert into user_extra(nonid) values (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(nonid) values (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
+ "TableName": "user_extra",
+ "VindexValues": {
+ "user_index": "NULL"
+ }
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert for non-compliant names",
+ "query": "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `weird``name`(`a``b*c`, `b*c`) values (:_a_b_c_0, 2)",
+ "TableName": "weird`name",
+ "VindexValues": {
+ "user_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.weird`name"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert from union",
+ "query": "insert into unsharded select 1 from dual union select 1 from dual",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select 1 from dual union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded select 1 from dual union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "insert for non-vindex autoinc, invalid value",
+ "query": "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
+ "TableName": "user_extra",
+ "VindexValues": {
+ "user_index": "NULL"
+ }
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert invalid index value",
+ "query": "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into music_extra(music_id, user_id) values (:_music_id_0, :_user_id_0)",
+ "TableName": "music_extra",
+ "VindexValues": {
+ "music_user_map": "INT64(1)",
+ "user_index": "DECIMAL(18446744073709551616)"
+ }
+ },
+ "TablesUsed": [
+ "user.music_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert invalid index value",
+ "query": "insert into music_extra(music_id, user_id) values(1, id)",
+ "plan": "column access not supported here"
+ },
+ {
+ "comment": "insert invalid table",
+ "query": "insert into noexist(music_id, user_id) values(1, 18446744073709551616)",
+ "plan": "table noexist not found"
+ },
+ {
+ "comment": "insert with multiple rows",
+ "query": "insert into user(id) values (1), (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) values (1), (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL, NULL",
+ "name_user_map": "NULL, NULL",
+ "user_index": ":__seq0, :__seq1"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with query timeout",
+ "query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
+ "QueryTimeout": 1,
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL, NULL",
+ "name_user_map": "NULL, NULL",
+ "user_index": ":__seq0, :__seq1"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert with multiple rows - multi-shard autocommit",
+ "query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": true,
+ "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
+ "TableName": "user",
+ "VindexValues": {
+ "costly_map": "NULL, NULL",
+ "name_user_map": "NULL, NULL",
+ "user_index": ":__seq0, :__seq1"
+ }
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert into a vindex not allowed",
+ "query": "insert into user_index(id) values(1)",
+ "plan": "unsupported: multi-shard or vindex write statement"
+ },
+ {
+ "comment": "simple replace unsharded",
+ "query": "replace into unsharded values(1, 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded values(1, 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded values (1, 2)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded with select",
+ "query": "replace into unsharded select id from unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded select id from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded select id from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, invalid value for auto-inc",
+ "query": "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, column present",
+ "query": "replace into unsharded_auto(id, val) values(1, 'aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(id, val) values(1, 'aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, column absent",
+ "query": "replace into unsharded_auto(val) values('aa')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(val) values('aa')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(val, id) values ('aa', :__seq0)",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace unsharded, multi-val",
+ "query": "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
+ "TableName": "unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "replace invalid table",
+ "query": "replace into noexist(music_id, user_id) values(1, 18446744073709551616)",
+ "plan": "table noexist not found"
+ },
+ {
+ "comment": "insert a row in a multi column vindex table",
+ "query": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0)",
+ "TableName": "multicolvin",
+ "VindexValues": {
+ "cola_map": "INT64(1)",
+ "colb_colc_map": "INT64(2), INT64(3)",
+ "kid_index": "INT64(4)"
+ }
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "insert for overlapped vindex columns",
+ "query": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into overlap_vindex(kid, column_a, column_b) values (:_kid_0, :_column_a_0, 3)",
+ "TableName": "overlap_vindex",
+ "VindexValues": {
+ "cola_kid_map": "INT64(2), INT64(1)",
+ "kid_index": "INT64(1)"
+ }
+ },
+ "TablesUsed": [
+ "user.overlap_vindex"
+ ]
+ }
+ },
+ {
+ "comment": "insert multiple rows in a multi column vindex table",
+ "query": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0), (:_column_a_1, :_column_b_1, :_column_c_1, :_kid_1)",
+ "TableName": "multicolvin",
+ "VindexValues": {
+ "cola_map": "INT64(1), INT64(5)",
+ "colb_colc_map": "INT64(2), INT64(6), INT64(3), INT64(7)",
+ "kid_index": "INT64(4), INT64(8)"
+ }
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "delete row in a multi column vindex table",
+ "query": "delete from multicolvin where kid=1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicolvin where kid=1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
+ "Query": "delete from multicolvin where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicolvin where kid=1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
+ "Query": "delete from multicolvin where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update columns of multi column vindex",
+ "query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update multiple vindexes, with multi column vindex",
+ "query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "cola_map:4",
+ "colb_colc_map:5"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "cola_map:4",
+ "colb_colc_map:5"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update with no primary vindex on where clause (scatter update)",
+ "query": "update user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with target destination",
+ "query": "update `user[-]`.user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update `user[-]`.user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with no primary vindex on where clause (scatter update) - multi shard autocommit",
+ "query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": true,
+ "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with no primary vindex on where clause (scatter update) - query timeout",
+ "query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
+ "QueryTimeout": 1,
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with non-comparison expr",
+ "query": "update user_extra set val = 1 where id between 1 and 2",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where id between 1 and 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where id between 1 and 2",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with primary id through IN clause",
+ "query": "update user_extra set val = 1 where user_id in (1, 2)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where user_id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where user_id in (1, 2)",
+ "Table": "user_extra",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with non-unique key",
+ "query": "update user_extra set val = 1 where name = 'foo'",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where `name` = 'foo'",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update by lookup with IN clause",
+ "query": "update user_extra set val = 1 where id in (1, 2)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where id in (1, 2)",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "update with where clause with parens",
+ "query": "update user_extra set val = 1 where (name = 'foo' or id = 1)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user_extra set val = 1 where (name = 'foo' or id = 1)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1 where `name` = 'foo' or id = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no where clause",
+ "query": "delete from user_extra",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete with target destination",
+ "query": "delete from `user[-]`.user_extra",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from `user[-]`.user_extra",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete with non-comparison expr",
+ "query": "delete from user_extra where user_id between 1 and 2",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra where user_id between 1 and 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra where user_id between 1 and 2",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no index match",
+ "query": "delete from user_extra where name = 'jose'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra where name = 'jose'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra where `name` = 'jose'",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no index match - multi shard autocommit",
+ "query": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": true,
+ "Query": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where `name` = 'jose'",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with no index match - query timeout",
+ "query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where `name` = 'jose'",
+ "QueryTimeout": 1,
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "delete from with primary id in through IN clause",
+ "query": "delete from user_extra where user_id in (1, 2)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user_extra where user_id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from user_extra where user_id in (1, 2)",
+ "Table": "user_extra",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded update where inner query references outer query",
+ "query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Table": "unsharded, unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
+ "Table": "unsharded, unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded delete where inner query references outer query",
+ "query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null",
+ "query": "update user set name = null where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = null where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = null where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using last_insert_id",
+ "query": "insert into unsharded values(last_insert_id(), 2)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded values(last_insert_id(), 2)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded values (:__lastInsertId, 2)",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null with multiple primary keyspace id",
+ "query": "update user set name = null where id in (1, 2, 3)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id in (1, 2, 3)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update",
+ "Query": "update `user` set `name` = null where id in (1, 2, 3)",
+ "Table": "user",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null without a where clause",
+ "query": "update user set name = null",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` for update",
+ "Query": "update `user` set `name` = null",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update vindex value to null with complex where clause",
+ "query": "update user set name = null where id + 1 = 2",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = null where id + 1 = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id + 1 = 2 for update",
+ "Query": "update `user` set `name` = null where id + 1 = 2",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from user by primary keyspace id with in clause",
+ "query": "delete from user where id in (1, 2, 3)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id in (1, 2, 3)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update",
+ "Query": "delete from `user` where id in (1, 2, 3)",
+ "Table": "user",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from user by complex expression",
+ "query": "delete from user where id + 1 = 2",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where id + 1 = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id + 1 = 2 for update",
+ "Query": "delete from `user` where id + 1 = 2",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete from user without a where clause",
+ "query": "delete from user",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with single table targets",
+ "query": "delete music from music where id = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete music from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete music from music where id = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
+ "Query": "delete from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "scatter update table with owned vindexes without changing lookup vindex",
+ "query": "update user set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set val = 1",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter delete with owned lookup vindex",
+ "query": "delete from user",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update multi column vindex, without values for all the vindex columns",
+ "query": "update multicolvin set column_c = 2 where kid = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicolvin set column_c = 2 where kid = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colb_colc_map:4"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "kid_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
+ "Query": "update multicolvin set column_c = 2 where kid = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "kid_index"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "update with binary value",
+ "query": "update user set name = _binary 'abc' where id = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = _binary 'abc' where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = _binary 'abc' where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set name = _binary 'abc' where id = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
+ "Query": "update `user` set `name` = _binary 'abc' where id = 1",
+ "Table": "user",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with binary value",
+ "query": "delete from user where name = _binary 'abc'",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where name = _binary 'abc'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
+ "Query": "delete from `user` where `name` = _binary 'abc'",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where name = _binary 'abc'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
+ "Query": "delete from `user` where `name` = _binary 'abc'",
+ "Table": "user",
+ "Values": [
+ "VARBINARY(\"abc\")"
+ ],
+ "Vindex": "name_user_map"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with shard targeting",
+ "query": "delete from `user[-]`.user",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from `user[-]`.user",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update with shard targeting",
+ "query": "update `user[-]`.user set name = 'myname'",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update `user[-]`.user set name = 'myname'",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from `user` for update",
+ "Query": "update `user` set `name` = 'myname'",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update with shard targeting without vindex",
+ "query": "update `user[-]`.user_extra set val = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update `user[-]`.user_extra set val = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update user_extra set val = 1",
+ "Table": "user_extra"
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "multi-table delete with single table",
+ "query": "delete u.* from user u where u.id * u.col = u.foo",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete u.* from user u where u.id * u.col = u.foo",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id * `user`.col = `user`.foo for update",
+ "Query": "delete from `user` where `user`.id * `user`.col = `user`.foo",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with unknown reference",
+ "query": "delete music from user where id = 1",
+ "plan": "Unknown table 'music' in MULTI DELETE"
+ },
+ {
+ "comment": "delete with derived tables",
+ "query": "delete music from (select * from user) music where id = 1",
+ "plan": "The target table music of the DELETE is not updatable"
+ },
+ {
+ "comment": "delete with derived tables with unknown table",
+ "query": "delete user from (select * from user) music where id = 1",
+ "plan": "Unknown table 'user' in MULTI DELETE"
+ },
+ {
+ "query": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update",
+ "TableName": "user_privacy_consents"
+ },
+ "TablesUsed": [
+ "main.user_privacy_consents"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update",
+ "TableName": "user_privacy_consents"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "main.user_privacy_consents"
+ ]
+ }
+ },
+ {
+ "comment": "Delete on backfilling unique lookup vindex should be a scatter",
+ "query": "delete from zlookup_unique.t1 where c2 = 20",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 20 for update",
+ "Query": "delete from t1 where c2 = 20",
+ "Table": "t1"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Update on backfilling unique lookup vindex should be a scatter",
+ "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 20",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 20 for update",
+ "Query": "update t1 set c2 = 1 where c2 = 20",
+ "Table": "t1"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Delete on backfilling and non-backfilling unique lookup vindexes should be a delete equal",
+ "query": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "delete from t1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "delete from t1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Update on backfilling and non-backfilling unique lookup vindexes should be an equal",
+ "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
+ "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
+ "Table": "t1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Delete EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be a delete IN",
+ "query": "delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 in (20, 21) for update",
+ "Query": "delete from t1 where c2 = 10 and c3 in (20, 21)",
+ "Table": "t1",
+ "Values": [
+ "(INT64(20), INT64(21))"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "Update EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be an update IN",
+ "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "lookup_t1:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "xxhash",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 in (20, 21) for update",
+ "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
+ "Table": "t1",
+ "Values": [
+ "(INT64(20), INT64(21))"
+ ],
+ "Vindex": "lookup_t1_2"
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "update with alias table",
+ "query": "update user u set u.name = 'john' where u.col > 20",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user u set u.name = 'john' where u.col > 20",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_user_map:3"
+ ],
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly, u.`name` = 'john' from `user` as u where u.col > 20 for update",
+ "Query": "update `user` as u set u.`name` = 'john' where u.col > 20",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with alias table",
+ "query": "delete from user u where u.col > 20",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user u where u.col > 20",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.col > 20 for update",
+ "Query": "delete from `user` as u where u.col > 20",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex",
+ "query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex - reverse order",
+ "query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex using an IN clause",
+ "query": "update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with a multicol vindex using an IN clause",
+ "query": "update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola in (3, 4)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(3), INT64(4))",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex",
+ "query": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex - reverse order",
+ "query": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
+ "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
+ "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex using an IN clause",
+ "query": "delete from multicol_tbl where colb IN (1,2) and cola = 1",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb IN (1,2) and cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola = 1 for update",
+ "Query": "delete from multicol_tbl where colb in (1, 2) and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with a multicol vindex using an IN clause",
+ "query": "delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola in (3, 4) for update",
+ "Query": "delete from multicol_tbl where colb in (1, 2) and cola in (3, 4)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(3), INT64(4))",
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with multicol and an owned vindex which changes",
+ "query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colc_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "colc_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
+ "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using non-unique lookup vindex",
+ "query": "update multicol_tbl set x = 42 where name = 'foo'",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 42 where name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 42 where `name` = 'foo'",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column",
+ "query": "update multicol_tbl set x = 42 where cola = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 42 where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 42 where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 42 where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 42 where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column on lookup vindex",
+ "query": "update multicol_tbl set name = 'bar' where cola = 1",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set name = 'bar' where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_muticoltbl_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
+ "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set name = 'bar' where cola = 1",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_muticoltbl_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
+ "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column with in query",
+ "query": "update multicol_tbl set name = 'bar' where cola in (1,2)",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set name = 'bar' where cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "ChangedVindexValues": [
+ "name_muticoltbl_map:4"
+ ],
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola in (1, 2) for update",
+ "Query": "update multicol_tbl set `name` = 'bar' where cola in (1, 2)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using subsharding column with in query as lower cost over lookup vindex",
+ "query": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using non-unique lookup vindex",
+ "query": "delete from multicol_tbl where name = 'foo'",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' for update",
+ "Query": "delete from multicol_tbl where `name` = 'foo'",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using subsharding column",
+ "query": "delete from multicol_tbl where cola = 1",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
+ "Query": "delete from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
+ "Query": "delete from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using subsharding column with in query",
+ "query": "delete from multicol_tbl where cola in (1,2)",
+ "plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola in (1, 2) for update",
+ "Query": "delete from multicol_tbl where cola in (1, 2)",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using subsharding column with in query as lower cost over lookup vindex",
+ "query": "delete from multicol_tbl where name = 'foo' and cola = 2",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
+ "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 2,
+ "KsidVindex": "multicolIdx",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
+ "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "VARCHAR(\"foo\")"
+ ],
+ "Vindex": "name_muticoltbl_map"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with simple table.",
+ "query": "insert into music(id, user_id) select * from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(id, user_id) select * from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "music",
+ "VindexOffsetFromSelect": {
+ "music_user_map": "[0]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into music(id, user_id) select * from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "music",
+ "VindexOffsetFromSelect": {
+ "music_user_map": "[0]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with more columns in insert",
+ "query": "insert into music(id, user_id) select 1",
+ "plan": "Column count doesn't match value count at row 1"
+ },
+ {
+ "comment": "insert using select with more columns in select",
+ "query": "insert into music(id, user_id) select id, count(user_id), sum(user_id) from user group by id",
+ "plan": "Column count doesn't match value count at row 1"
+ },
+ {
+ "comment": "insert using select with more columns in select after accounting for star column",
+ "query": "insert into music(id, user_id) select id, *, 2 from user",
+ "plan": "Column count doesn't match value count at row 1"
+ },
+ {
+ "comment": "insert using select with auto-inc column using vitess sequence, sequence column not present",
+ "query": "insert into user_extra(user_id) select id from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id) select id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id) select id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with auto-inc column using vitess sequence, sequence column present",
+ "query": "insert into user_extra(id, user_id) select null, id from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(id, user_id) select null, id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select null, id from `user` where 1 != 1",
+ "Query": "select null, id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(id, user_id) select null, id from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select null, id from `user` where 1 != 1",
+ "Query": "select null, id from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded insert from select",
+ "query": "insert into user(id) select 1 from dual",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:0",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(id) select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:0",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with sharding column is autoinc and not present in the insert column query",
+ "query": "insert into user(pattern) SELECT 1",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(pattern) SELECT 1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user(pattern) SELECT 1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:1",
+ "MultiShardAutocommit": false,
+ "TableName": "user",
+ "VindexOffsetFromSelect": {
+ "costly_map": "[-1]",
+ "name_user_map": "[-1]",
+ "user_index": "[1]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert using select with sharding column is not an autoinc and not present in the insert column query",
+ "query": "insert into user_extra(pattern) SELECT 1",
+ "plan": "insert query does not have sharding column 'user_id' in the column list"
+ },
+ {
+ "comment": "sharded same keyspace",
+ "query": "insert into user_extra(user_id, col) select col1, col2 from user",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1",
+ "Query": "select col1, col2 from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1",
+ "Query": "select col1, col2 from `user` for update",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded same keyspace",
+ "query": "insert into unsharded(col) select col from unsharded_auto",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded(col) select col from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_auto",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into unsharded(col) select col from unsharded_auto for update",
+ "TableName": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "sharded different keyspace",
+ "query": "insert into user_extra(user_id, col) select col1, col2 from t1",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from t1 where 1 != 1",
+ "Query": "select col1, col2 from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from t1 where 1 != 1",
+ "Query": "select col1, col2 from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra",
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "sharded insert table, unsharded select table",
+ "query": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
+ "Query": "select col1, col2 from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user_extra"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Select",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "AutoIncrement": "main:2",
+ "MultiShardAutocommit": false,
+ "TableName": "user_extra",
+ "VindexOffsetFromSelect": {
+ "user_index": "[0]"
+ },
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
+ "Query": "select col1, col2 from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main_2.unsharded_tab",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded different keyspace",
+ "query": "insert into unsharded(col) select col from unsharded_tab",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded_tab where 1 != 1",
+ "Query": "select col from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded_tab where 1 != 1",
+ "Query": "select col from unsharded_tab for update",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main_2.unsharded_tab"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded insert table, sharded select table",
+ "query": "insert into unsharded(col) select col from t1",
+ "v3-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from t1 where 1 != 1",
+ "Query": "select col from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into unsharded(col) select col from t1",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "TableName": "unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from t1 where 1 != 1",
+ "Query": "select col from t1 for update",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded subquery in sharded update, not the same keyspace between outer and inner",
+ "query": "update user set col = (select id from unsharded)",
+ "v3-plan": "unsupported: subqueries in sharded DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select id from unsharded)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded lock in share mode",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = :__sq1",
+ "Table": "user"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded subquery in unsharded update, not the same keyspace",
+ "query": "update unsharded set col = (select id from user)",
+ "v3-plan": "unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` lock in share mode",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = :__sq1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join unsharded subqueries in unsharded update",
+ "query": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)",
+ "v3-plan": "unsupported: sharded subqueries in DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "unsharded_id": 0
+ },
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded lock in share mode",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = :unsharded_id lock in share mode",
+ "Table": "`user`",
+ "Values": [
+ ":unsharded_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update unsharded set col = :__sq1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded update with sub query where the sources can be merged into a single query",
+ "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
+ "v3-plan": "unsupported: subqueries in sharded DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
+ "Table": "user",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "merge through correlated subquery",
+ "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5",
+ "v3-plan": "unsupported: subqueries in sharded DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id = 5",
+ "Table": "user",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "merge through correlated subquery #2",
+ "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5",
+ "v3-plan": "unsupported: subqueries in sharded DML",
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id > 5",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "all defaults empty column, empty values",
+ "query": "insert into authoritative () values ()",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "insert into authoritative () values ()",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into authoritative(user_id) values (:_user_id_0)",
+ "TableName": "authoritative",
+ "VindexValues": {
+ "user_index": "NULL"
+ }
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "explain dml without any directive should fail",
+ "query": "explain format=vtexplain delete from user",
+ "plan": "explain format = vtexplain will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `explain /*vt+ EXECUTE_DML_QUERIES */ format = vtexplain delete from t1`"
+ },
+ {
+ "comment": "explain dml with actually_run_query directive",
+ "query": "explain /*vt+ execute_dml_queries */ format=vtexplain delete from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain /*vt+ execute_dml_queries */ format=vtexplain delete from user",
+ "Instructions": {
+ "OperatorType": "VTEXPLAIN",
+ "Inputs": [
+ {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "explain dml with actually_run_query directive - 2",
+ "query": "explain /*vt+ eXECUTE_DML_QUERIES */ format=vtexplain delete from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain /*vt+ eXECUTE_DML_QUERIES */ format=vtexplain delete from user",
+ "Instructions": {
+ "OperatorType": "VTEXPLAIN",
+ "Inputs": [
+ {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
+ "Query": "delete from `user`",
+ "Table": "user"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Here V3 populates the TablesUsed incorrectly\n# delete with join from multi table join subquery.",
+ "query": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded, unsharded, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded, unsharded_b"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000",
+ "Table": "unsharded, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "update with routing using multi column vindex",
+ "query": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "delete with routing using multi column vindex",
+ "query": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
+ "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "KsidLength": 1,
+ "KsidVindex": "user_index",
+ "MultiShardAutocommit": false,
+ "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
+ "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "user",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "update using last_insert_id with an argument",
+ "query": "update main.m1 set foo = last_insert_id(foo+1) where id = 12345",
+ "plan": {
+ "QueryType": "UPDATE",
+ "Original": "update main.m1 set foo = last_insert_id(foo+1) where id = 12345",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update m1 set foo = last_insert_id(foo + 1) where id = 12345",
+ "Table": "m1"
+ },
+ "TablesUsed": [
+ "main.m1"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt b/go/vt/vtgate/planbuilder/testdata/dml_cases.txt
deleted file mode 100644
index d141cd661ad..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/dml_cases.txt
+++ /dev/null
@@ -1,6090 +0,0 @@
-# update table not found
-"update nouser set val = 1"
-"table nouser not found"
-Gen4 plan same as above
-
-# delete table not found
-"delete from nouser"
-"table nouser not found"
-Gen4 plan same as above
-
-# explicit keyspace reference
-"update main.m1 set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update main.m1 set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update m1 set val = 1",
- "Table": "m1"
- },
- "TablesUsed": [
- "main.m1"
- ]
-}
-Gen4 plan same as above
-
-# update unsharded
-"update unsharded set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set val = 1",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# subqueries in unsharded update
-"update unsharded set col = (select col from unsharded limit 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select col from unsharded limit 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select col from unsharded limit 1)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# unsharded union in subquery of unsharded update
-"update unsharded set col = (select id from unsharded union select id from unsharded)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded union select id from unsharded)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded union select id from unsharded)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# unsharded join in subquery of unsharded update
-"update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded a join unsharded b on a.id = b.id)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded as a join unsharded as b on a.id = b.id)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# update with join subquery
-"update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# routing rules: updated of a routed table
-"update route1 set a=1 where id=1"
-{
- "QueryType": "UPDATE",
- "Original": "update route1 set a=1 where id=1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as route1 set a = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update route1 set a=1 where id=1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as route1 set a = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update: routing rules for subquery.
-"update unsharded_a set a=(select a from route2)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a set a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a set a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded_a"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a set a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a set a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# delete unsharded
-"delete from unsharded"
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# update by primary keyspace id
-"update user set val = 1 where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id with alias
-"update user as user_alias set val = 1 where user_alias.id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user as user_alias set val = 1 where user_alias.id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id with parenthesized expression
-"update user set val = 1 where (id = 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id with multi-part where clause with parens
-"update user set val = 1 where (name = 'foo' and id = 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where (name = 'foo' and id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id, changing one vindex column
-"update user_metadata set email = 'juan@vitess.io' where user_id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-
-# update by primary keyspace id, changing same vindex twice
-"update user_metadata set email = 'a', email = 'b' where user_id = 1"
-"column has duplicate set values: 'email'"
-Gen4 plan same as above
-
-# update by primary keyspace id, changing multiple vindex columns
-"update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "address_user_map:4",
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "address_user_map:4",
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-
-# update by primary keyspace id, changing one vindex column, using order by and limit
-"update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10"
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "email_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update",
- "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10",
- "Table": "user_metadata",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_metadata"
- ]
-}
-
-# update changes non owned vindex column
-"update music_extra set music_id = 1 where user_id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update music_extra set music_id = 1 where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "music_user_map:1"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
- "Query": "update music_extra set music_id = 1 where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update music_extra set music_id = 1 where user_id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "music_user_map:1"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update",
- "Query": "update music_extra set music_id = 1 where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-
-# update by primary keyspace id, stray where clause
-"update user set val = 1 where id = id2 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = id2 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = id2 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = id2 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = id2 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# update by primary keyspace id, stray where clause with conversion error
-"update user set val = 1 where id = 18446744073709551616 and id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete from by primary keyspace id
-"delete from user where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
- "Query": "delete from `user` where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update",
- "Query": "delete from `user` where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multi-table delete with comma join
-"delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete a from unsharded_a a, unsharded_b b where a.id = b.id and b.val = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete a from unsharded_a as a, unsharded_b as b where a.id = b.id and b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# multi-table delete with ansi join
-"delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete a from unsharded_a a join unsharded_b b on a.id = b.id where b.val = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete a from unsharded_a as a join unsharded_b as b on a.id = b.id where b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-#delete with join from subquery
-"delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000"
-{
- "QueryType": "DELETE",
- "Original": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# routing rules: deleted from a routed table
-"delete from route1 where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
- "Query": "delete from `user` as route1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update",
- "Query": "delete from `user` as route1 where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete: routing rules for subquery
-"delete from unsharded_a where a=(select a from route2)"
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded_a where a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded_a"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded_a where a=(select a from route2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded_a where a = (select a from unsharded as route2)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# update by lookup
-"update music set val = 1 where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update music set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update music set val = 1 where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update music set val = 1 where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update music set val = 1 where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# update multi-table ansi join
-"update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a a join unsharded_b b on a.id = b.id set a.val = 'foo' where b.val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a as a join unsharded_b as b on a.id = b.id set a.val = 'foo' where b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# update multi-table comma join
-"update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded_a a, unsharded_b b set a.val = 'foo' where a.id = b.id and b.val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded_a as a, unsharded_b as b set a.val = 'foo' where a.id = b.id and b.val = 1",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# delete from by lookup
-"delete from music where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# delete from, no owned vindexes
-"delete from music_extra where user_id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from music_extra where user_id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from music_extra where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from music_extra where user_id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from music_extra where user_id = 1",
- "Table": "music_extra",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-
-# simple insert, no values
-"insert into unsharded values()"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values()",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values ()",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# simple insert unsharded
-"insert into unsharded values(1, 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values(1, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (1, 2)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# simple upsert unsharded
-"insert into unsharded values(1, 2) on duplicate key update x = 3"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values(1, 2) on duplicate key update x = 3",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (1, 2) on duplicate key update x = 3",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# unsharded insert, no col list with auto-inc and authoritative column list
-"insert into unsharded_authoritative values(1,1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_authoritative values(1,1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_authoritative(col1, col2) values (:__seq0, 1)",
- "TableName": "unsharded_authoritative"
- },
- "TablesUsed": [
- "main.unsharded_authoritative"
- ]
-}
-Gen4 plan same as above
-
-# sharded upsert with sharding key set to vindex column
-"insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)"
-{
- "QueryType": "INSERT",
- "Original": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(user_id)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0) on duplicate key update user_id = values(user_id)",
- "TableName": "music",
- "VindexValues": {
- "music_user_map": "INT64(2)",
- "user_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-Gen4 plan same as above
-
-# sharded bulk upsert with sharding key set to vindex column
-"insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)"
-{
- "QueryType": "INSERT",
- "Original": "insert into music(user_id, id) values (1, 2), (3,4) on duplicate key update user_id = values(user_id)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0), (:_user_id_1, :_id_1) on duplicate key update user_id = values(user_id)",
- "TableName": "music",
- "VindexValues": {
- "music_user_map": "INT64(2), INT64(4)",
- "user_index": "INT64(1), INT64(3)"
- }
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded with select
-"insert into unsharded select id from unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# insert unsharded with select with join
-"insert into unsharded select id from unsharded join unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded join unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select id from unsharded join unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select id from unsharded join unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# insert unsharded, invalid value for auto-inc
-"insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, column present
-"insert into unsharded_auto(id, val) values(1, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(id, val) values(1, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, column absent
-"insert into unsharded_auto(val) values('aa')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(val) values('aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(val, id) values ('aa', :__seq0)",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, column absent
-"insert into unsharded_auto(val) values(false)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(val) values(false)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(val, id) values (false, :__seq0)",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# insert unsharded, multi-val
-"insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# unsharded insert subquery in insert value
-"insert into unsharded values((select 1 from dual), 1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values((select 1 from dual), 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (1, 1)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# sharded insert subquery in insert value
-"insert into user(id, val) values((select 1), 1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id, val) values((select 1), 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert into a routed table
-"insert into route1(id) values (1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into route1(id) values (1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with mimatched column list
-"insert into user(id) values (1, 2)"
-"column list doesn't match values"
-Gen4 plan same as above
-
-# insert no column list for sharded authoritative table
-"insert into authoritative values(1, 2, 3)"
-{
- "QueryType": "INSERT",
- "Original": "insert into authoritative values(1, 2, 3)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, 2, 3)",
- "TableName": "authoritative",
- "VindexValues": {
- "user_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-Gen4 plan same as above
-
-# insert sharded, no values
-"insert into user values()"
-{
- "QueryType": "INSERT",
- "Original": "insert into user values()",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with one vindex
-"insert into user(id) values (1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values (1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert ignore sharded
-"insert ignore into user(id) values (1)"
-{
- "QueryType": "INSERT",
- "Original": "insert ignore into user(id) values (1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert ignore into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert on duplicate key
-"insert into user(id) values(1) on duplicate key update col = 2"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values(1) on duplicate key update col = 2",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "InsertIgnore": true,
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with one vindex and bind var
-"insert into user(id) values (:aa)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values (:aa)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with non vindex
-"insert into user(nonid) values (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid) values (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with default seq
-"insert into user(id, nonid) values (default, 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id, nonid) values (default, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with non vindex bool value
-"insert into user(nonid) values (true)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid) values (true)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "NULL",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with all vindexes supplied
-"insert into user(nonid, name, id) values (2, 'foo', 1)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(nonid, name, id) values (2, 'foo', 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL",
- "name_user_map": "VARCHAR(\"foo\")",
- "user_index": ":__seq0"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert for non-vindex autoinc
-"insert into user_extra(nonid) values (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(nonid) values (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
- "TableName": "user_extra",
- "VindexValues": {
- "user_index": "NULL"
- }
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# insert for non-compliant names
-"insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into `weird``name`(`a``b*c`, `b*c`) values(1, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `weird``name`(`a``b*c`, `b*c`) values (:_a_b_c_0, 2)",
- "TableName": "weird`name",
- "VindexValues": {
- "user_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.weird`name"
- ]
-}
-Gen4 plan same as above
-
-# unsharded insert from union
-"insert into unsharded select 1 from dual union select 1 from dual"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select 1 from dual union select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded select 1 from dual union select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded select 1 from dual union select 1 from dual for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.dual",
- "main.unsharded"
- ]
-}
-
-# insert for non-vindex autoinc, invalid value
-"insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(nonid, extra_id) values (2, 18446744073709551616)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)",
- "TableName": "user_extra",
- "VindexValues": {
- "user_index": "NULL"
- }
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# insert invalid index value
-"insert into music_extra(music_id, user_id) values(1, 18446744073709551616)"
-{
- "QueryType": "INSERT",
- "Original": "insert into music_extra(music_id, user_id) values(1, 18446744073709551616)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into music_extra(music_id, user_id) values (:_music_id_0, :_user_id_0)",
- "TableName": "music_extra",
- "VindexValues": {
- "music_user_map": "INT64(1)",
- "user_index": "DECIMAL(18446744073709551616)"
- }
- },
- "TablesUsed": [
- "user.music_extra"
- ]
-}
-Gen4 plan same as above
-
-# insert invalid index value
-"insert into music_extra(music_id, user_id) values(1, id)"
-"column access not supported here"
-Gen4 plan same as above
-
-# insert invalid table
-"insert into noexist(music_id, user_id) values(1, 18446744073709551616)"
-"table noexist not found"
-Gen4 plan same as above
-
-# insert with multiple rows
-"insert into user(id) values (1), (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) values (1), (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL, NULL",
- "name_user_map": "NULL, NULL",
- "user_index": ":__seq0, :__seq1"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with query timeout
-"insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into user(id) values (1), (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
- "QueryTimeout": 1,
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL, NULL",
- "name_user_map": "NULL, NULL",
- "user_index": ":__seq0, :__seq1"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert with multiple rows - multi-shard autocommit
-"insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)"
-{
- "QueryType": "INSERT",
- "Original": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user(id) values (1), (2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": true,
- "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)",
- "TableName": "user",
- "VindexValues": {
- "costly_map": "NULL, NULL",
- "name_user_map": "NULL, NULL",
- "user_index": ":__seq0, :__seq1"
- }
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# insert into a vindex not allowed
-"insert into user_index(id) values(1)"
-"unsupported: multi-shard or vindex write statement"
-Gen4 plan same as above
-
-# simple replace unsharded
-"replace into unsharded values(1, 2)"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded values(1, 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded values (1, 2)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded with select
-"replace into unsharded select id from unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded select id from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded select id from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# replace unsharded, invalid value for auto-inc
-"replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(id, val) values(18446744073709551616, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded, column present
-"replace into unsharded_auto(id, val) values(1, 'aa')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(id, val) values(1, 'aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded, column absent
-"replace into unsharded_auto(val) values('aa')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(val) values('aa')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(val, id) values ('aa', :__seq0)",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace unsharded, multi-val
-"replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')"
-{
- "QueryType": "INSERT",
- "Original": "replace into unsharded_auto(id, val) values(1, 'aa'), (null, 'bb')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')",
- "TableName": "unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded_auto"
- ]
-}
-Gen4 plan same as above
-
-# replace invalid table
-"replace into noexist(music_id, user_id) values(1, 18446744073709551616)"
-"table noexist not found"
-Gen4 plan same as above
-
-# insert a row in a multi column vindex table
-"insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)"
-{
- "QueryType": "INSERT",
- "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0)",
- "TableName": "multicolvin",
- "VindexValues": {
- "cola_map": "INT64(1)",
- "colb_colc_map": "INT64(2), INT64(3)",
- "kid_index": "INT64(4)"
- }
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-Gen4 plan same as above
-
-# insert for overlapped vindex columns
-"insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)"
-{
- "QueryType": "INSERT",
- "Original": "insert overlap_vindex (kid, column_a, column_b) VALUES (1,2,3)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into overlap_vindex(kid, column_a, column_b) values (:_kid_0, :_column_a_0, 3)",
- "TableName": "overlap_vindex",
- "VindexValues": {
- "cola_kid_map": "INT64(2), INT64(1)",
- "kid_index": "INT64(1)"
- }
- },
- "TablesUsed": [
- "user.overlap_vindex"
- ]
-}
-Gen4 plan same as above
-
-# insert multiple rows in a multi column vindex table
-"insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)"
-{
- "QueryType": "INSERT",
- "Original": "insert multicolvin (column_a, column_b, column_c, kid) VALUES (1,2,3,4), (5,6,7,8)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0), (:_column_a_1, :_column_b_1, :_column_c_1, :_kid_1)",
- "TableName": "multicolvin",
- "VindexValues": {
- "cola_map": "INT64(1), INT64(5)",
- "colb_colc_map": "INT64(2), INT64(6), INT64(3), INT64(7)",
- "kid_index": "INT64(4), INT64(8)"
- }
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-Gen4 plan same as above
-
-# delete row in a multi column vindex table
-"delete from multicolvin where kid=1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicolvin where kid=1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
- "Query": "delete from multicolvin where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicolvin where kid=1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update",
- "Query": "delete from multicolvin where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update columns of multi column vindex
-"update multicolvin set column_b = 1, column_c = 2 where kid = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update multiple vindexes, with multi column vindex
-"update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "cola_map:4",
- "colb_colc_map:5"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "cola_map:4",
- "colb_colc_map:5"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update with no primary vindex on where clause (scatter update)
-"update user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with target destination
-"update `user[-]`.user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update `user[-]`.user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with no primary vindex on where clause (scatter update) - multi shard autocommit
-"update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": true,
- "Query": "update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with no primary vindex on where clause (scatter update) - query timeout
-"update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1",
- "QueryTimeout": 1,
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with non-comparison expr
-"update user_extra set val = 1 where id between 1 and 2"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where id between 1 and 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where id between 1 and 2",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with primary id through IN clause
-"update user_extra set val = 1 where user_id in (1, 2)"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where user_id in (1, 2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where user_id in (1, 2)",
- "Table": "user_extra",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with non-unique key
-"update user_extra set val = 1 where name = 'foo'"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where name = 'foo'",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where `name` = 'foo'",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update by lookup with IN clause
-"update user_extra set val = 1 where id in (1, 2)"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where id in (1, 2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where id in (1, 2)",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# update with where clause with parens
-"update user_extra set val = 1 where (name = 'foo' or id = 1)"
-{
- "QueryType": "UPDATE",
- "Original": "update user_extra set val = 1 where (name = 'foo' or id = 1)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1 where `name` = 'foo' or id = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no where clause
-"delete from user_extra"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete with target destination
-"delete from `user[-]`.user_extra"
-{
- "QueryType": "DELETE",
- "Original": "delete from `user[-]`.user_extra",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete with non-comparison expr
-"delete from user_extra where user_id between 1 and 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra where user_id between 1 and 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra where user_id between 1 and 2",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no index match
-"delete from user_extra where name = 'jose'"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra where name = 'jose'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra where `name` = 'jose'",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no index match - multi shard autocommit
-"delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'"
-{
- "QueryType": "DELETE",
- "Original": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where name = 'jose'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": true,
- "Query": "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where `name` = 'jose'",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with no index match - query timeout
-"delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'"
-{
- "QueryType": "DELETE",
- "Original": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where name = 'jose'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where `name` = 'jose'",
- "QueryTimeout": 1,
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# delete from with primary id in through IN clause
-"delete from user_extra where user_id in (1, 2)"
-{
- "QueryType": "DELETE",
- "Original": "delete from user_extra where user_id in (1, 2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from user_extra where user_id in (1, 2)",
- "Table": "user_extra",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# unsharded update where inner query references outer query
-"update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Table": "unsharded, unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)",
- "Table": "unsharded, unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-
-# unsharded delete where inner query references outer query
-"delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)"
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# update vindex value to null
-"update user set name = null where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
- "Query": "update `user` set `name` = null where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update",
- "Query": "update `user` set `name` = null where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# insert using last_insert_id
-"insert into unsharded values(last_insert_id(), 2)"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded values(last_insert_id(), 2)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded values (:__lastInsertId, 2)",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# update vindex value to null with multiple primary keyspace id
-"update user set name = null where id in (1, 2, 3)"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id in (1, 2, 3)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update",
- "Query": "update `user` set `name` = null where id in (1, 2, 3)",
- "Table": "user",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update vindex value to null without a where clause
-"update user set name = null"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` for update",
- "Query": "update `user` set `name` = null",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update vindex value to null with complex where clause
-"update user set name = null where id + 1 = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = null where id + 1 = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id + 1 = 2 for update",
- "Query": "update `user` set `name` = null where id + 1 = 2",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete from user by primary keyspace id with in clause
-"delete from user where id in (1, 2, 3)"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id in (1, 2, 3)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update",
- "Query": "delete from `user` where id in (1, 2, 3)",
- "Table": "user",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete from user by complex expression
-"delete from user where id + 1 = 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where id + 1 = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id + 1 = 2 for update",
- "Query": "delete from `user` where id + 1 = 2",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete from user without a where clause
-"delete from user"
-{
- "QueryType": "DELETE",
- "Original": "delete from user",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete with single table targets
-"delete music from music where id = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete music from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete music from music where id = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select user_id, id from music where id = 1 for update",
- "Query": "delete from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# scatter update table with owned vindexes without changing lookup vindex
-"update user set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set val = 1",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# scatter delete with owned lookup vindex
-"delete from user"
-{
- "QueryType": "DELETE",
- "Original": "delete from user",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update multi column vindex, without values for all the vindex columns
-"update multicolvin set column_c = 2 where kid = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicolvin set column_c = 2 where kid = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colb_colc_map:4"
- ],
- "KsidLength": 1,
- "KsidVindex": "kid_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update",
- "Query": "update multicolvin set column_c = 2 where kid = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "kid_index"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# update with binary value
-"update user set name = _binary 'abc' where id = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = _binary 'abc' where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
- "Query": "update `user` set `name` = _binary 'abc' where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set name = _binary 'abc' where id = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update",
- "Query": "update `user` set `name` = _binary 'abc' where id = 1",
- "Table": "user",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete with binary value
-"delete from user where name = _binary 'abc'"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where name = _binary 'abc'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
- "Query": "delete from `user` where `name` = _binary 'abc'",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from user where name = _binary 'abc'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update",
- "Query": "delete from `user` where `name` = _binary 'abc'",
- "Table": "user",
- "Values": [
- "VARBINARY(\"abc\")"
- ],
- "Vindex": "name_user_map"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete with shard targeting
-"delete from `user[-]`.user"
-{
- "QueryType": "DELETE",
- "Original": "delete from `user[-]`.user",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update with shard targeting
-"update `user[-]`.user set name = 'myname'"
-{
- "QueryType": "UPDATE",
- "Original": "update `user[-]`.user set name = 'myname'",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from `user` for update",
- "Query": "update `user` set `name` = 'myname'",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update with shard targeting without vindex
-"update `user[-]`.user_extra set val = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update `user[-]`.user_extra set val = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update user_extra set val = 1",
- "Table": "user_extra"
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# multi-table delete with single table
-"delete u.* from user u where u.id * u.col = u.foo"
-{
- "QueryType": "DELETE",
- "Original": "delete u.* from user u where u.id * u.col = u.foo",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id * `user`.col = `user`.foo for update",
- "Query": "delete from `user` where `user`.id * `user`.col = `user`.foo",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# delete with unknown reference
-"delete music from user where id = 1"
-"Unknown table 'music' in MULTI DELETE"
-Gen4 plan same as above
-
-# delete with derived tables
-"delete music from (select * from user) music where id = 1"
-"The target table music of the DELETE is not updatable"
-Gen4 plan same as above
-
-# delete with derived tables with unknown table
-"delete user from (select * from user) music where id = 1"
-"Unknown table 'user' in MULTI DELETE"
-Gen4 plan same as above
-
-"INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update",
- "TableName": "user_privacy_consents"
- },
- "TablesUsed": [
- "main.user_privacy_consents"
- ]
-}
-Gen4 error: unsupported: unable to split predicates to derived table: not :__sq_has_values1
-
-# Delete on backfilling unique lookup vindex should be a scatter
-"delete from zlookup_unique.t1 where c2 = 20"
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 20 for update",
- "Query": "delete from t1 where c2 = 20",
- "Table": "t1"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-# Update on backfilling unique lookup vindex should be a scatter
-"update zlookup_unique.t1 set c2 = 1 where c2 = 20"
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 20 for update",
- "Query": "update t1 set c2 = 1 where c2 = 20",
- "Table": "t1"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-# Delete on backfilling and non-backfilling unique lookup vindexes should be a delete equal
-"delete from zlookup_unique.t1 where c2 = 10 and c3 = 20"
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "delete from t1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "delete from t1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-
-# Update on backfilling and non-backfilling unique lookup vindexes should be an equal
-"update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20"
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update",
- "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20",
- "Table": "t1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-
-# Delete EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be a delete IN
-"delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)"
-{
- "QueryType": "DELETE",
- "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 in (20, 21)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 in (20, 21) for update",
- "Query": "delete from t1 where c2 = 10 and c3 in (20, 21)",
- "Table": "t1",
- "Values": [
- "(INT64(20), INT64(21))"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-# Update EQUAL and IN on backfilling and non-backfilling unique lookup vindexes should be an update IN
-"update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)"
-{
- "QueryType": "UPDATE",
- "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "lookup_t1:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "xxhash",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 in (20, 21) for update",
- "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)",
- "Table": "t1",
- "Values": [
- "(INT64(20), INT64(21))"
- ],
- "Vindex": "lookup_t1_2"
- },
- "TablesUsed": [
- "zlookup_unique.t1"
- ]
-}
-Gen4 plan same as above
-
-#update with alias table
-"update user u set u.name = 'john' where u.col \u003e 20"
-{
- "QueryType": "UPDATE",
- "Original": "update user u set u.name = 'john' where u.col \u003e 20",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_user_map:3"
- ],
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly, u.`name` = 'john' from `user` as u where u.col \u003e 20 for update",
- "Query": "update `user` as u set u.`name` = 'john' where u.col \u003e 20",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-#delete with alias table
-"delete from user u where u.col \u003e 20"
-{
- "QueryType": "DELETE",
- "Original": "delete from user u where u.col \u003e 20",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.col \u003e 20 for update",
- "Query": "delete from `user` as u where u.col \u003e 20",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# update with a multicol vindex
-"update multicol_tbl set x = 1 where cola = 1 and colb = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with a multicol vindex - reverse order
-"update multicol_tbl set x = 1 where colb = 2 and cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with a multicol vindex using an IN clause
-"update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with a multicol vindex using an IN clause
-"update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where colb IN (1,2) and cola IN (3,4)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola in (3, 4)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(3), INT64(4))",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with a multicol vindex
-"delete from multicol_tbl where cola = 1 and colb = 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "delete from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with a multicol vindex - reverse order
-"delete from multicol_tbl where colb = 2 and cola = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
- "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update",
- "Query": "delete from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with a multicol vindex using an IN clause
-"delete from multicol_tbl where colb IN (1,2) and cola = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb IN (1,2) and cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola = 1 for update",
- "Query": "delete from multicol_tbl where colb in (1, 2) and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with a multicol vindex using an IN clause
-"delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where colb IN (1,2) and cola IN (3,4)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola in (3, 4) for update",
- "Query": "delete from multicol_tbl where colb in (1, 2) and cola in (3, 4)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(3), INT64(4))",
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with multicol and an owned vindex which changes
-"update multicol_tbl set colc = 1 where cola = 1 and colb = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colc_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "colc_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update",
- "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with routing using non-unique lookup vindex
-"update multicol_tbl set x = 42 where name = 'foo'"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 42 where name = 'foo'",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 42 where `name` = 'foo'",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with routing using subsharding column
-"update multicol_tbl set x = 42 where cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 42 where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 42 where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 42 where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 42 where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with routing using subsharding column on lookup vindex
-"update multicol_tbl set name = 'bar' where cola = 1"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set name = 'bar' where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_muticoltbl_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
- "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set name = 'bar' where cola = 1",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_muticoltbl_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update",
- "Query": "update multicol_tbl set `name` = 'bar' where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# update with routing using subsharding column with in query
-"update multicol_tbl set name = 'bar' where cola in (1,2)"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set name = 'bar' where cola in (1,2)",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "ChangedVindexValues": [
- "name_muticoltbl_map:4"
- ],
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola in (1, 2) for update",
- "Query": "update multicol_tbl set `name` = 'bar' where cola in (1, 2)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# update with routing using subsharding column with in query as lower cost over lookup vindex
-"update multicol_tbl set x = 1 where name = 'foo' and cola = 2"
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with routing using non-unique lookup vindex
-"delete from multicol_tbl where name = 'foo'"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where name = 'foo'",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' for update",
- "Query": "delete from multicol_tbl where `name` = 'foo'",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with routing using subsharding column
-"delete from multicol_tbl where cola = 1"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
- "Query": "delete from multicol_tbl where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update",
- "Query": "delete from multicol_tbl where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# delete with routing using subsharding column with in query
-"delete from multicol_tbl where cola in (1,2)"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where cola in (1,2)",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola in (1, 2) for update",
- "Query": "delete from multicol_tbl where cola in (1, 2)",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-Gen4 plan same as above
-
-# delete with routing using subsharding column with in query as lower cost over lookup vindex
-"delete from multicol_tbl where name = 'foo' and cola = 2"
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
- "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from multicol_tbl where name = 'foo' and cola = 2",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 2,
- "KsidVindex": "multicolIdx",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update",
- "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2",
- "Table": "multicol_tbl",
- "Values": [
- "VARCHAR(\"foo\")"
- ],
- "Vindex": "name_muticoltbl_map"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# insert using select with simple table.
-"insert into music(id, user_id) select * from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into music(id, user_id) select * from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "music",
- "VindexOffsetFromSelect": {
- "music_user_map": "[0]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into music(id, user_id) select * from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "music",
- "VindexOffsetFromSelect": {
- "music_user_map": "[0]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# insert using select with more columns in insert
-"insert into music(id, user_id) select 1"
-"Column count doesn't match value count at row 1"
-Gen4 plan same as above
-
-# insert using select with more columns in select
-"insert into music(id, user_id) select id, count(user_id), sum(user_id) from user group by id"
-"Column count doesn't match value count at row 1"
-Gen4 plan same as above
-
-# insert using select with more columns in select after accounting for star column
-"insert into music(id, user_id) select id, *, 2 from user"
-"Column count doesn't match value count at row 1"
-Gen4 plan same as above
-
-# insert using select with auto-inc column using vitess sequence, sequence column not present
-"insert into user_extra(user_id) select id from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id) select id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id) select id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# insert using select with auto-inc column using vitess sequence, sequence column present
-"insert into user_extra(id, user_id) select null, id from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(id, user_id) select null, id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select null, id from `user` where 1 != 1",
- "Query": "select null, id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(id, user_id) select null, id from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select null, id from `user` where 1 != 1",
- "Query": "select null, id from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded insert from select
-"insert into user(id) select 1 from dual"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:0",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user(id) select 1 from dual",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:0",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# insert using select with sharding column is autoinc and not present in the insert column query
-"insert into user(pattern) SELECT 1"
-{
- "QueryType": "INSERT",
- "Original": "insert into user(pattern) SELECT 1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user(pattern) SELECT 1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:1",
- "MultiShardAutocommit": false,
- "TableName": "user",
- "VindexOffsetFromSelect": {
- "costly_map": "[-1]",
- "name_user_map": "[-1]",
- "user_index": "[1]"
- },
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# insert using select with sharding column is not an autoinc and not present in the insert column query
-"insert into user_extra(pattern) SELECT 1"
-"insert query does not have sharding column 'user_id' in the column list"
-Gen4 plan same as above
-
-# sharded same keyspace
-"insert into user_extra(user_id, col) select col1, col2 from user"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1",
- "Query": "select col1, col2 from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from user",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1",
- "Query": "select col1, col2 from `user` for update",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# unsharded same keyspace
-"insert into unsharded(col) select col from unsharded_auto"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded(col) select col from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_auto",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into unsharded(col) select col from unsharded_auto for update",
- "TableName": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# sharded different keyspace
-"insert into user_extra(user_id, col) select col1, col2 from t1"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from t1 where 1 != 1",
- "Query": "select col1, col2 from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from t1 where 1 != 1",
- "Query": "select col1, col2 from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra",
- "zlookup_unique.t1"
- ]
-}
-
-# sharded insert table, unsharded select table
-"insert into user_extra(user_id, col) select col1, col2 from unsharded_tab"
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
- "Query": "select col1, col2 from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "user.user_extra"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Select",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "AutoIncrement": "main:2",
- "MultiShardAutocommit": false,
- "TableName": "user_extra",
- "VindexOffsetFromSelect": {
- "user_index": "[0]"
- },
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1",
- "Query": "select col1, col2 from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main_2.unsharded_tab",
- "user.user_extra"
- ]
-}
-
-# unsharded different keyspace
-"insert into unsharded(col) select col from unsharded_tab"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded_tab where 1 != 1",
- "Query": "select col from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from unsharded_tab",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded_tab where 1 != 1",
- "Query": "select col from unsharded_tab for update",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main_2.unsharded_tab"
- ]
-}
-
-# unsharded insert table, sharded select table
-"insert into unsharded(col) select col from t1"
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col from t1 where 1 != 1",
- "Query": "select col from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-{
- "QueryType": "INSERT",
- "Original": "insert into unsharded(col) select col from t1",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "TableName": "unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "zlookup_unique",
- "Sharded": true
- },
- "FieldQuery": "select col from t1 where 1 != 1",
- "Query": "select col from t1 for update",
- "Table": "t1"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "zlookup_unique.t1"
- ]
-}
-
-# unsharded subquery in sharded update, not the same keyspace between outer and inner
-"update user set col = (select id from unsharded)"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select id from unsharded)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded lock in share mode",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = :__sq1",
- "Table": "user"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sharded subquery in unsharded update, not the same keyspace
-"update unsharded set col = (select id from user)"
-"unsupported: sharded subqueries in DML"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` lock in share mode",
- "Table": "`user`"
- },
- {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = :__sq1",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sharded join unsharded subqueries in unsharded update
-"update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)"
-"unsupported: sharded subqueries in DML"
-{
- "QueryType": "UPDATE",
- "Original": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "unsharded_id": 0
- },
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded lock in share mode",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = :unsharded_id lock in share mode",
- "Table": "`user`",
- "Values": [
- ":unsharded_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Update",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update unsharded set col = :__sq1",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sharded update with sub query where the sources can be merged into a single query
-"update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5",
- "Table": "user",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# merge through correlated subquery
-"update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id = 5",
- "Table": "user",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# merge through correlated subquery #2
-"update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id \u003e 5"
-"unsupported: subqueries in sharded DML"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id \u003e 5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id \u003e 5",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# all defaults empty column, empty values
-"insert into authoritative () values ()"
-{
- "QueryType": "INSERT",
- "Original": "insert into authoritative () values ()",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into authoritative(user_id) values (:_user_id_0)",
- "TableName": "authoritative",
- "VindexValues": {
- "user_index": "NULL"
- }
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-Gen4 plan same as above
-
-# explain dml without any directive should fail
-"explain format=vtexplain delete from user"
-"explain format = vtexplain will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `explain /*vt+ EXECUTE_DML_QUERIES */ format = vtexplain delete from t1`"
-Gen4 plan same as above
-
-# explain dml with actually_run_query directive
-"explain /*vt+ execute_dml_queries */ format=vtexplain delete from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain /*vt+ execute_dml_queries */ format=vtexplain delete from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# explain dml with actually_run_query directive - 2
-"explain /*vt+ eXECUTE_DML_QUERIES */ format=vtexplain delete from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain /*vt+ eXECUTE_DML_QUERIES */ format=vtexplain delete from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update",
- "Query": "delete from `user`",
- "Table": "user"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Here V3 populates the TablesUsed incorrectly
-# delete with join from multi table join subquery.
-"delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000"
-{
- "QueryType": "DELETE",
- "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded, unsharded, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded, unsharded_b"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col \u003c 1000",
- "Table": "unsharded, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_b"
- ]
-}
-
-# update with routing using multi column vindex
-"update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# delete with routing using multi column vindex
-"delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "DELETE",
- "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
- "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "KsidLength": 1,
- "KsidVindex": "user_index",
- "MultiShardAutocommit": false,
- "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update",
- "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "user",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
new file mode 100644
index 00000000000..242d447cc40
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json
@@ -0,0 +1,6477 @@
+[
+ {
+ "comment": "No where clause",
+ "query": "select id from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Query that always return empty",
+ "query": "select id from user where someColumn = null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where someColumn = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn = null",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where someColumn = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn = null",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Null Safe Equality Operator is handled correctly",
+ "query": "SELECT id from user where someColumn <=> null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id from user where someColumn <=> null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn <=> null",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id from user where someColumn <=> null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where someColumn <=> null",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unique vindex route",
+ "query": "select id from user where user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unique vindex route, but complex expr",
+ "query": "select id from user where user.id = 5+5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5+5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5 + 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 5+5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 5 + 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(10)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table multiple unique vindex match",
+ "query": "select id from music where id = 5 and user_id = 4",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = 5 and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = 5 and user_id = 4",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = 5 and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = 5 and user_id = 4",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table multiple non-unique vindex match",
+ "query": "select id from user where costly = 'aa' and name = 'bb'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly = 'aa' and name = 'bb'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"bb\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly = 'aa' and name = 'bb'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"bb\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table multiple non-unique vindex match for IN clause",
+ "query": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause",
+ "query": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, swapped columns",
+ "query": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, choose cost within tuple",
+ "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, choose cost within tuple, swapped",
+ "query": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause, choose cost",
+ "query": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN clause vs equality",
+ "query": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: multiple vindex matches",
+ "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: tuple inside tuple",
+ "query": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: tuple inside tuple, but no match in tuple",
+ "query": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Table": "`user`",
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: tuple inside tuple, mismiatched values",
+ "query": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: RHS not tuple",
+ "query": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Composite IN: RHS has no simple values",
+ "query": "select id from user where (col1, name) in (('aa', 1+1))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (('aa', 1+1))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where (col1, name) in (('aa', 1+1))",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "(INT64(2))"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "IN clause: LHS is neither column nor composite tuple",
+ "query": "select Id from user where 1 in ('aa', 'bb')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select Id from user where 1 in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select Id from `user` where 1 != 1",
+ "Query": "select Id from `user` where 1 in ('aa', 'bb')",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select Id from user where 1 in ('aa', 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select Id from `user` where 1 != 1",
+ "Query": "select Id from `user` where 1 in ('aa', 'bb')",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table complex in clause",
+ "query": "select id from user where name in (col, 'bb')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in (col, 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in (col, 'bb')",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in (col, 'bb')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in (col, 'bb')",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table equality route with val arg",
+ "query": "select id from user where name = :a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = :a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = :a",
+ "Table": "`user`",
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = :a",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = :a",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table equality route with unsigned value",
+ "query": "select id from user where name = 18446744073709551615",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = 18446744073709551615",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = 18446744073709551615",
+ "Table": "`user`",
+ "Values": [
+ "UINT64(18446744073709551615)"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name = 18446744073709551615",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "UINT64(18446744073709551615)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` = 18446744073709551615",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table in clause list arg",
+ "query": "select id from user where name in ::list",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in ::list",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":list"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where name in ::list",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ ":list"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `name` in ::__vals",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint",
+ "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint on right table",
+ "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint on left table of left join",
+ "query": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unique vindex constraint on left-joined right table",
+ "query": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route unique vindex constraint",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route unique vindex route on both routes",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = 5",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and user_extra.user_id = 5 and `user`.col = user_extra.col",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route with cross-route constraint",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
+ "Table": "user_extra",
+ "Values": [
+ ":user_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
+ "Table": "user_extra",
+ "Values": [
+ ":user_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-route with non-route constraint, should use first route.",
+ "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where 1 = 1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where 1 = 1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where 1 = 1 and user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectIN is the best constraint.",
+ "query": "select id from user where user.col = 5 and user.id in (1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints and boolean, SelectIN is the best constraint.",
+ "query": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints and boolean, SelectEqual is the best constraint.",
+ "query": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id or col as val from `user` where 1 != 1",
+ "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id or col as val from `user` where 1 != 1",
+ "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectEqual is the best constraint.",
+ "query": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint.",
+ "query": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed.",
+ "query": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Route with OR and AND clause, must parenthesize correctly.",
+ "query": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Unsharded route",
+ "query": "select unsharded.id from user join unsharded where unsharded.id = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "unsharded_id": 0
+ },
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :unsharded_id",
+ "Table": "`user`",
+ "Values": [
+ ":unsharded_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: choose the redirected table",
+ "query": "select col from route1 where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col)",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col) and u.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery merge-able into a route of a join tree",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "ensure subquery reordering gets us a better plan",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id = 5 and u.id in (select m2 from `user` where `user`.id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = 5) and u.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "nested subquery",
+ "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id))",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.m from `user` as u where 1 != 1",
+ "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(:user_extra_col, INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Correlated subquery in where clause",
+ "query": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "outer and inner subquery route by same int val",
+ "query": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "outer and inner subquery route by same str val",
+ "query": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "outer and inner subquery route by same val arg",
+ "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Table": "`user`",
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
+ "Table": "`user`",
+ "Values": [
+ ":a"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unresolved symbol in inner subquery.",
+ "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a and foo.id = 1)",
+ "plan": "symbol foo.id not found"
+ },
+ {
+ "comment": "outer and inner subquery route by same outermost column value",
+ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` as uu where 1 != 1",
+ "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` as uu where 1 != 1",
+ "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery in IN clause.\n# Note the improved Underlying plan as SelectIN.",
+ "query": "select id from user where id in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery in NOT IN clause.",
+ "query": "select id from user where id not in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id not in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id not in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery in EXISTS clause.",
+ "query": "select id from user where exists (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where exists (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where exists (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard subquery as expression",
+ "query": "select id from user where id = (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi-level pullout",
+ "query": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id3 from `user` where 1 != 1",
+ "Query": "select id3 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` where 1 != 1",
+ "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id1 from `user` where 1 != 1",
+ "Query": "select id1 from `user` where id = :__sq2",
+ "Table": "`user`",
+ "Values": [
+ ":__sq2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id3 from `user` where 1 != 1",
+ "Query": "select id3 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from `user` where 1 != 1",
+ "Query": "select id2 from `user` where :__sq_has_values2 = 1 and id2 in ::__sq2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id1 from `user` where 1 != 1",
+ "Query": "select id1 from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules subquery merge",
+ "query": "select col from user where id = (select id from route1 where route1.id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules subquery pullout",
+ "query": "select col from user where id = (select id from route2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route2)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded as route2 where 1 != 1",
+ "Query": "select id from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route2)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded as route2 where 1 != 1",
+ "Query": "select id from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = :__sq1",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Case preservation test",
+ "query": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1",
+ "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from `user`, user_extra where 1 != 1",
+ "Query": "select user_extra.Id from `user`, user_extra where `user`.Id = 5 and `user`.iD = user_extra.User_Id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "database() call in where clause.",
+ "query": "select id from user where database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where database()",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where database()",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Select with equals null",
+ "query": "select id from music where id = null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id = null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "SELECT with IS NULL",
+ "query": "select id from music where id is null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "SELECT with IS NOT NULL",
+ "query": "select id from music where id is not null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is not null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is not null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is not null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is not null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and null match",
+ "query": "select id from music where user_id = 4 and id = null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id = null",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id = null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id = null",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and IN (null)",
+ "query": "select id from music where user_id = 4 and id IN (null)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null)",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null)",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and IN (null, 1, 2)",
+ "query": "select id from music where user_id = 4 and id IN (null, 1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
+ "Table": "music",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and NOT IN (null, 1, 2)",
+ "query": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Single table with unique vindex match and NOT IN (null, 1, 2) predicates inverted",
+ "query": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "pullout sq after pullout sq",
+ "query": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(42)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(411)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and (:__sq_has_values2 = 0 or id not in ::__sq2)",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(411)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(42)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where (:__sq_has_values1 = 0 or id not in ::__sq1) and (:__sq_has_values2 = 1 and id in ::__vals)",
+ "Table": "`user`",
+ "Values": [
+ ":__sq2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "solving LIKE query with a CFC prefix vindex",
+ "query": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
+ "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Table": "cfc_vindex_col",
+ "Values": [
+ "VARCHAR(\"A%\")"
+ ],
+ "Vindex": "cfc"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
+ "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
+ "Table": "cfc_vindex_col",
+ "Values": [
+ "VARCHAR(\"A%\")"
+ ],
+ "Vindex": "cfc"
+ },
+ "TablesUsed": [
+ "user.cfc_vindex_col"
+ ]
+ }
+ },
+ {
+ "comment": "select * from samecolvin where col = :col",
+ "query": "select * from samecolvin where col = :col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from samecolvin where col = :col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from samecolvin where 1 != 1",
+ "Query": "select col from samecolvin where col = :col",
+ "Table": "samecolvin",
+ "Values": [
+ ":col"
+ ],
+ "Vindex": "vindex1"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from samecolvin where col = :col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from samecolvin where 1 != 1",
+ "Query": "select col from samecolvin where col = :col",
+ "Table": "samecolvin",
+ "Values": [
+ ":col"
+ ],
+ "Vindex": "vindex1"
+ },
+ "TablesUsed": [
+ "user.samecolvin"
+ ]
+ }
+ },
+ {
+ "comment": "non unique predicate on vindex",
+ "query": "select id from user where user.id > 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id > 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id > 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id > 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id > 5",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select from unsharded keyspace with uncorrelated subquery which should be merged to a single route",
+ "query": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "in subquery the id will be scoped to local table as there is no qualifier associated with it.",
+ "query": "select id from user where id in (select col from unsharded where col = id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from unsharded where col = id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded where col = id",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select col from unsharded where col = id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded where col = id",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery with different keyspace tables involved",
+ "query": "select id from user where id in (select col from unsharded where col = user.id)",
+ "plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "correlated subquery with same keyspace",
+ "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "SelectDBA with uncorrelated subqueries",
+ "query": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema from information_schema.`tables` as t where 1 != 1",
+ "Query": "select t.table_schema from information_schema.`tables` as t where t.table_schema in (select c.column_name from information_schema.`columns` as c)",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema from information_schema.`tables` as t where 1 != 1",
+ "Query": "select t.table_schema from information_schema.`tables` as t where t.table_schema in (select c.column_name from information_schema.`columns` as c)",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.columns",
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "SelectReference with uncorrelated subqueries",
+ "query": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref where 1 != 1",
+ "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref where 1 != 1",
+ "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "SelectEqualUnique with uncorrelated subqueries",
+ "query": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SelectEqualUnique with EXISTS uncorrelated subquery",
+ "query": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SelectEqualUnique with NOT EXISTS uncorrelated subquery",
+ "query": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SelectScatter with NOT EXISTS uncorrelated subquery",
+ "query": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "The outer and first inner are SelectEqualUnique with same Vindex value, the second inner has different Vindex value",
+ "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and :__sq_has_values1 = 1 and id in ::__sq1 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values2 = 1 and id in ::__sq2)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "The outer and second inner are SelectEqualUnique with same Vindex value, the first inner has different Vindex value",
+ "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values1 = 0 or id not in ::__sq1)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutNotIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5 and (:__sq_has_values1 = 0 or id not in ::__sq1) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "two correlated subqueries that can be merge in a single route",
+ "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "transitive closures for the win",
+ "query": "select id from user where user.id = user.col and user.col = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = user.col and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where user.id = user.col and user.col = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join with transitive closures",
+ "query": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_id and user_extra.col = user_extra.user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user`, user_extra where 1 != 1",
+ "Query": "select id from `user`, user_extra where user_extra.col = user_extra.user_id and `user`.id = user_extra.col",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "not supported transitive closures with equality inside of an OR",
+ "query": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_id and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`)",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = :user_extra_col",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules subquery merge with alias",
+ "query": "select col from user where id = (select id from route1 as a where a.id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "left join where clauses where we can optimize into an inner join",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.foobar = 5 and user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "this query lead to a nil pointer error",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where foo(user_extra.foobar)",
+ "plan": "expr cannot be translated, not supported: foo(user_extra.foobar)"
+ },
+ {
+ "comment": "filter after outer join",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "user_extra.id is null",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "subquery on other table",
+ "query": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where col2 = 'a'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), 1",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where col2 = 'a'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, 1 ASC",
+ "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1 order by `user`.id asc, `user`.col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "should use colb_colc_map as first column of the vindex is present in predicate",
+ "query": "select * from multicolvin where column_b = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "should only use first column of the vindex colb_colc_map",
+ "query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "uses vindex colb_colc_map",
+ "query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "v3 takes cola_map, gen4 takes colb_colc_map, may be based on map key ordering",
+ "query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "cola_map"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicolvin where 1 != 1",
+ "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
+ "Table": "multicolvin",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "colb_colc_map"
+ },
+ "TablesUsed": [
+ "user.multicolvin"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex produces Equal plan in gen4 and Scatter in v3",
+ "query": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with different order places the vindex keys in correct order",
+ "query": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "INT64(2)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex produces IN plan in gen4 and Scatter in v3",
+ "query": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in (1, 2) and colb in (3, 4)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in ::__vals0 and colb in ::__vals1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))",
+ "(INT64(3), INT64(4))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with different order places the vindex keys in correct order in IN plan in gen4",
+ "query": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (3, 4) and cola in (1, 2)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in ::__vals1 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(2))",
+ "(INT64(3), INT64(4))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with different order with one IN predicate and one equality",
+ "query": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 1 and cola in (3, 4)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 1 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(3), INT64(4))",
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with both IN predicate and equality predicate",
+ "query": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(4)",
+ "INT64(7)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with one column with equal followed by IN predicate, ordering matters for now",
+ "query": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 4 and colb in (1, 10) and cola in (5, 6)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb = 4 and colb in ::__vals1 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(5), INT64(6))",
+ "(INT64(1), INT64(10))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with one column with IN followed by equal predicate, ordering matters for now",
+ "query": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in (5, 6)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in ::__vals0",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(5), INT64(6))",
+ "INT64(4)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex with better plan selection",
+ "query": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(5)",
+ "INT64(6)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex as tuple",
+ "query": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "MultiEqual",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
+ "Table": "multicol_tbl",
+ "Values": [
+ "(INT64(1), INT64(3))",
+ "(INT64(2), INT64(4))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex, partial vindex with SelectEqual",
+ "query": "select * from multicol_tbl where cola = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN",
+ "query": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb in (2, 3)",
+ "Table": "multicol_tbl"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from multicol_tbl where 1 != 1",
+ "Query": "select * from multicol_tbl where cola = 1 and colb in ::__vals1",
+ "Table": "multicol_tbl",
+ "Values": [
+ "INT64(1)",
+ "(INT64(2), INT64(3))"
+ ],
+ "Vindex": "multicolIdx"
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ },
+ {
+ "comment": "left join with where clause - should be handled by gen4 but still isn't",
+ "query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where 1 != 1",
+ "Query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "filter on outer join should not be used for routing",
+ "query": "select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where 1 != 1",
+ "Query": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where `user`.id is null",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "conditions following a null safe comparison operator can be used for routing",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id <=> NULL AND music.user_id = 10",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id <=> NULL AND music.user_id = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 10 and `user`.id <=> null",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(10)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, where conditions using both sides of the join are not pulled into the join conditions",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, where conditions using both sides of the join are not pulled into the join conditions (swapped order)",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, null intolerant where conditions using both sides of the join are transformed to inner joins",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id = 5 and music.user_id = `user`.id and music.componist = `user`.`name`",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "For left joins, null intolerant where conditions using `IS NOT NULL` allow outer join simplification",
+ "query": "SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id = 5 and `user`.id is not null and `user`.id = music.user_id",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "optimize ORs to IN route op codes #1",
+ "query": "select col from user where id = 1 or id = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "optimize ORs to IN route op codes #2",
+ "query": "select col from user where id = 1 or id = 2 or id = 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2 or id = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 or id = 2 or id = 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "optimize ORs to IN route op codes #3",
+ "query": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
+ "Table": "`user`",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3), INT64(4))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Don't pick a vindex for an IS NULL predicate if it's a lookup vindex",
+ "query": "select id from music where id is null and user_id in (1,2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null and user_id in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null and user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from music where id is null and user_id in (1,2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music where id is null and user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Self referencing columns in HAVING should work",
+ "query": "select a+2 as a from user having a = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a+2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a + 2 as a from `user` where 1 != 1",
+ "Query": "select a + 2 as a from `user` having a = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a+2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a + 2 as a from `user` where 1 != 1",
+ "Query": "select a + 2 as a from `user` where a + 2 = 42",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING predicates that use table columns are safe to rewrite if we can move them to the WHERE clause",
+ "query": "select user.col + 2 as a from user having a = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col + 2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col + 2 as a from `user` where 1 != 1",
+ "Query": "select `user`.col + 2 as a from `user` having a = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col + 2 as a from user having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col + 2 as a from `user` where 1 != 1",
+ "Query": "select `user`.col + 2 as a from `user` where `user`.col + 2 = 42",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING predicates that use table columns should not get rewritten on unsharded keyspaces",
+ "query": "select col + 2 as a from unsharded having a = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col + 2 as a from unsharded having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col + 2 as a from unsharded where 1 != 1",
+ "Query": "select col + 2 as a from unsharded having a = 42",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col + 2 as a from unsharded having a = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col + 2 as a from unsharded where 1 != 1",
+ "Query": "select col + 2 as a from unsharded having a = 42",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt b/go/vt/vtgate/planbuilder/testdata/filter_cases.txt
deleted file mode 100644
index 75ef9178495..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/filter_cases.txt
+++ /dev/null
@@ -1,6238 +0,0 @@
-# No where clause
-"select id from user"
-{
- "QueryType": "SELECT",
- "Original": "select id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Query that always return empty
-"select id from user where someColumn = null"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where someColumn = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn = null",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where someColumn = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn = null",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Null Safe Equality Operator is handled correctly
-"SELECT id from user where someColumn \u003c=\u003e null"
-{
- "QueryType": "SELECT",
- "Original": "SELECT id from user where someColumn \u003c=\u003e null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn \u003c=\u003e null",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT id from user where someColumn \u003c=\u003e null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where someColumn \u003c=\u003e null",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table unique vindex route
-"select id from user where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table unique vindex route, but complex expr
-"select id from user where user.id = 5+5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5+5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5 + 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 5+5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 5 + 5",
- "Table": "`user`",
- "Values": [
- "INT64(10)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table multiple unique vindex match
-"select id from music where id = 5 and user_id = 4"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = 5 and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = 5 and user_id = 4",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = 5 and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = 5 and user_id = 4",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table multiple non-unique vindex match
-"select id from user where costly = 'aa' and name = 'bb'"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly = 'aa' and name = 'bb'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"bb\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly = 'aa' and name = 'bb'",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"bb\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table multiple non-unique vindex match for IN clause
-"select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause
-"select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, swapped columns
-"select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, choose cost within tuple
-"select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, choose cost within tuple, swapped
-"select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause, choose cost
-"select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN clause vs equality
-"select id from user where (col, name) in (('aa', 'bb')) and id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: multiple vindex matches
-"select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: tuple inside tuple
-"select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: tuple inside tuple, but no match in tuple
-"select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Table": "`user`",
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: tuple inside tuple, mismiatched values
-"select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Composite IN: RHS not tuple
-"select id from user where (col1, name) in (select * from music where music.user_id=user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Composite IN: RHS has no simple values
-"select id from user where (col1, name) in (('aa', 1+1))"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (('aa', 1+1))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where (col1, name) in (('aa', 1+1))",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "(INT64(2))"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# IN clause: LHS is neither column nor composite tuple
-"select Id from user where 1 in ('aa', 'bb')"
-{
- "QueryType": "SELECT",
- "Original": "select Id from user where 1 in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select Id from `user` where 1 != 1",
- "Query": "select Id from `user` where 1 in ('aa', 'bb')",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select Id from user where 1 in ('aa', 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select Id from `user` where 1 != 1",
- "Query": "select Id from `user` where 1 in ('aa', 'bb')",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table complex in clause
-"select id from user where name in (col, 'bb')"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in (col, 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in (col, 'bb')",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in (col, 'bb')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in (col, 'bb')",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table equality route with val arg
-"select id from user where name = :a"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = :a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = :a",
- "Table": "`user`",
- "Values": [
- ":a"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = :a",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- ":a"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = :a",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table equality route with unsigned value
-"select id from user where name = 18446744073709551615"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = 18446744073709551615",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = 18446744073709551615",
- "Table": "`user`",
- "Values": [
- "UINT64(18446744073709551615)"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name = 18446744073709551615",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "UINT64(18446744073709551615)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` = 18446744073709551615",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table in clause list arg
-"select id from user where name in ::list"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in ::list",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in ::__vals",
- "Table": "`user`",
- "Values": [
- ":list"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where name in ::list",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- ":list"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `name` in ::__vals",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Multi-table unique vindex constraint
-"select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-table unique vindex constraint on right table
-"select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-table unique vindex constraint on left table of left join
-"select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select user_extra.id from `user` left join user_extra on `user`.id = user_extra.user_id where `user`.id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Multi-table unique vindex constraint on left-joined right table
-"select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user left join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where user_extra.user_id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Multi-route unique vindex constraint
-"select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-route unique vindex route on both routes
-"select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = 5",
- "Table": "user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.id from `user`, user_extra where `user`.id = 5 and user_extra.user_id = 5 and `user`.col = user_extra.col",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-route with cross-route constraint
-"select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
- "Table": "user_extra",
- "Values": [
- ":user_col"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col",
- "Table": "user_extra",
- "Values": [
- ":user_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Multi-route with non-route constraint, should use first route.
-"select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where 1 = 1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where 1 = 1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where 1 = 1 and user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Route with multiple route constraints, SelectIN is the best constraint.
-"select id from user where user.col = 5 and user.id in (1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints and boolean, SelectIN is the best constraint.
-"select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints and boolean, SelectEqual is the best constraint.
-"select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'"
-{
- "QueryType": "SELECT",
- "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id or col as val from `user` where 1 != 1",
- "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id or col as val from `user` where 1 != 1",
- "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints, SelectEqual is the best constraint.
-"select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints, SelectEqualUnique is the best constraint.
-"select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed.
-"select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Route with OR and AND clause, must parenthesize correctly.
-"select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Unsharded route
-"select unsharded.id from user join unsharded where unsharded.id = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "unsharded_id": 0
- },
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :unsharded_id",
- "Table": "`user`",
- "Values": [
- ":unsharded_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# routing rules: choose the redirected table
-"select col from route1 where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# subquery
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col)",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col) and u.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery merge-able into a route of a join tree
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# ensure subquery reordering gets us a better plan
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id = 5 and u.id in (select m2 from `user` where `user`.id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = 5) and u.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# nested subquery
-"select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id))",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.m from `user` as u where 1 != 1",
- "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(:user_extra_col, INT64(1))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Correlated subquery in where clause
-"select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# outer and inner subquery route by same int val
-"select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# outer and inner subquery route by same str val
-"select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# outer and inner subquery route by same val arg
-"select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Table": "`user`",
- "Values": [
- ":a"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)",
- "Table": "`user`",
- "Values": [
- ":a"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# unresolved symbol in inner subquery.
-"select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a and foo.id = 1)"
-"symbol foo.id not found"
-Gen4 plan same as above
-
-# outer and inner subquery route by same outermost column value
-"select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))"
-{
- "QueryType": "SELECT",
- "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` as uu where 1 != 1",
- "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` as uu where 1 != 1",
- "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# cross-shard subquery in IN clause.
-# Note the improved Underlying plan as SelectIN.
-"select id from user where id in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard subquery in NOT IN clause.
-"select id from user where id not in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id not in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id not in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard subquery in EXISTS clause.
-"select id from user where exists (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where exists (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where exists (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard subquery as expression
-"select id from user where id = (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multi-level pullout
-"select id1 from user where id = (select id2 from user where id2 in (select id3 from user))"
-{
- "QueryType": "SELECT",
- "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id3 from `user` where 1 != 1",
- "Query": "select id3 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` where 1 != 1",
- "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id1 from `user` where 1 != 1",
- "Query": "select id1 from `user` where id = :__sq2",
- "Table": "`user`",
- "Values": [
- ":__sq2"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id3 from `user` where 1 != 1",
- "Query": "select id3 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from `user` where 1 != 1",
- "Query": "select id2 from `user` where :__sq_has_values2 = 1 and id2 in ::__sq2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id1 from `user` where 1 != 1",
- "Query": "select id1 from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules subquery merge
-"select col from user where id = (select id from route1 where route1.id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 where route1.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules subquery pullout
-"select col from user where id = (select id from route2)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route2)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded as route2 where 1 != 1",
- "Query": "select id from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route2)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded as route2 where 1 != 1",
- "Query": "select id from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = :__sq1",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Case preservation test
-"select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1",
- "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from `user`, user_extra where 1 != 1",
- "Query": "select user_extra.Id from `user`, user_extra where `user`.Id = 5 and `user`.iD = user_extra.User_Id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# database() call in where clause.
-"select id from user where database()"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where database()",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where database()",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Select with equals null
-"select id from music where id = null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id = null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# SELECT with IS NULL
-"select id from music where id is null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# SELECT with IS NOT NULL
-"select id from music where id is not null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is not null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is not null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is not null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is not null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and null match
-"select id from music where user_id = 4 and id = null"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id = null",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id = null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id = null",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and IN (null)
-"select id from music where user_id = 4 and id IN (null)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null)",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null)",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and IN (null, 1, 2)
-"select id from music where user_id = 4 and id IN (null, 1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id in (null, 1, 2)",
- "Table": "music",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and NOT IN (null, 1, 2)
-"select id from music where user_id = 4 and id NOT IN (null, 1, 2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Single table with unique vindex match and NOT IN (null, 1, 2) predicates inverted
-"select id from music where id NOT IN (null, 1, 2) and user_id = 4"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id not in (null, 1, 2) and user_id = 4",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# pullout sq after pullout sq
-"select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
- "Table": "user_extra",
- "Values": [
- "INT64(42)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
- "Table": "user_extra",
- "Values": [
- "INT64(411)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and (:__sq_has_values2 = 0 or id not in ::__sq2)",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 411",
- "Table": "user_extra",
- "Values": [
- "INT64(411)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 42",
- "Table": "user_extra",
- "Values": [
- "INT64(42)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where (:__sq_has_values1 = 0 or id not in ::__sq1) and (:__sq_has_values2 = 1 and id in ::__vals)",
- "Table": "`user`",
- "Values": [
- ":__sq2"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# solving LIKE query with a CFC prefix vindex
-"select c2 from cfc_vindex_col where c1 like 'A%'"
-{
- "QueryType": "SELECT",
- "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
- "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Table": "cfc_vindex_col",
- "Values": [
- "VARCHAR(\"A%\")"
- ],
- "Vindex": "cfc"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1",
- "Query": "select c2 from cfc_vindex_col where c1 like 'A%'",
- "Table": "cfc_vindex_col",
- "Values": [
- "VARCHAR(\"A%\")"
- ],
- "Vindex": "cfc"
- },
- "TablesUsed": [
- "user.cfc_vindex_col"
- ]
-}
-
-"select * from samecolvin where col = :col"
-{
- "QueryType": "SELECT",
- "Original": "select * from samecolvin where col = :col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from samecolvin where 1 != 1",
- "Query": "select col from samecolvin where col = :col",
- "Table": "samecolvin",
- "Values": [
- ":col"
- ],
- "Vindex": "vindex1"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from samecolvin where col = :col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from samecolvin where 1 != 1",
- "Query": "select col from samecolvin where col = :col",
- "Table": "samecolvin",
- "Values": [
- ":col"
- ],
- "Vindex": "vindex1"
- },
- "TablesUsed": [
- "user.samecolvin"
- ]
-}
-
-# non unique predicate on vindex
-"select id from user where user.id \u003e 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id \u003e 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id \u003e 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id \u003e 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id \u003e 5",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select from unsharded keyspace with uncorrelated subquery which should be merged to a single route
-"select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)",
- "Table": "unsharded, unsharded_a"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a"
- ]
-}
-
-# in subquery the id will be scoped to local table as there is no qualifier associated with it.
-"select id from user where id in (select col from unsharded where col = id)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from unsharded where col = id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded where col = id",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select col from unsharded where col = id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded where col = id",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# correlated subquery with different keyspace tables involved
-"select id from user where id in (select col from unsharded where col = user.id)"
-"unsupported: cross-shard correlated subquery"
-Gen4 plan same as above
-
-# correlated subquery with same keyspace
-"select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# SelectDBA with uncorrelated subqueries
-"select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)"
-{
- "QueryType": "SELECT",
- "Original": "select t.table_schema from information_schema.tables as t where t.table_schema in (select c.column_name from information_schema.columns as c)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.table_schema from information_schema.`tables` as t where 1 != 1",
- "Query": "select t.table_schema from information_schema.`tables` as t where t.table_schema in (select c.column_name from information_schema.`columns` as c)",
- "Table": "information_schema.`tables`"
- }
-}
-Gen4 plan same as above
-
-# SelectReference with uncorrelated subqueries
-"select ref.col from ref where ref.col in (select ref.col from ref)"
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref where 1 != 1",
- "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref where 1 != 1",
- "Query": "select ref.col from ref where ref.col in (select ref.col from ref)",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# SelectEqualUnique with uncorrelated subqueries
-"select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# SelectEqualUnique with EXISTS uncorrelated subquery
-"select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# SelectEqualUnique with NOT EXISTS uncorrelated subquery
-"select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# SelectScatter with NOT EXISTS uncorrelated subquery
-"select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.col from `user` as u1 where not :__sq_has_values1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# The outer and first inner are SelectEqualUnique with same Vindex value, the second inner has different Vindex value
-"select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and :__sq_has_values1 = 1 and id in ::__sq1 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values2 = 1 and id in ::__sq2)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# The outer and second inner are SelectEqualUnique with same Vindex value, the first inner has different Vindex value
-"select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values1 = 0 or id not in ::__sq1)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutNotIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.user_id = 4",
- "Table": "user_extra",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5 and (:__sq_has_values1 = 0 or id not in ::__sq1) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# two correlated subqueries that can be merge in a single route
-"select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# transitive closures for the win
-"select id from user where user.id = user.col and user.col = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = user.col and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where user.id = user.col and user.col = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join with transitive closures
-"select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_id and user_extra.col = user_extra.user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user`, user_extra where 1 != 1",
- "Query": "select id from `user`, user_extra where user_extra.col = user_extra.user_id and `user`.id = user_extra.col",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# not supported transitive closures with equality inside of an OR
-"select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_id and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`)",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = :user_extra_col",
- "Table": "`user`",
- "Values": [
- ":user_extra_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# routing rules subquery merge with alias
-"select col from user where id = (select id from route1 as a where a.id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# left join where clauses where we can optimize into an inner join
-"select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.foobar = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.foobar = 5 and user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# this query lead to a nil pointer error
-"select user.id from user left join user_extra on user.col = user_extra.col where foo(user_extra.foobar)"
-"expr cannot be translated, not supported: foo(user_extra.foobar)"
-Gen4 plan same as above
-
-# filter after outer join
-"select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": "user_extra.id is null",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-#subquery on other table
-"select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')"
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where col2 = 'a'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), 1",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where col2 = 'a'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, 1 ASC",
- "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1 order by `user`.id asc, `user`.col asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# should use colb_colc_map as first column of the vindex is present in predicate
-"select * from multicolvin where column_b = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# should only use first column of the vindex colb_colc_map
-"select * from multicolvin where column_b = 1 and column_c = 2"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# uses vindex colb_colc_map
-"select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# v3 takes cola_map, gen4 takes colb_colc_map, may be based on map key ordering
-"select * from multicolvin where column_a = 3 and column_b = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "cola_map"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicolvin where 1 != 1",
- "Query": "select * from multicolvin where column_a = 3 and column_b = 1",
- "Table": "multicolvin",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "colb_colc_map"
- },
- "TablesUsed": [
- "user.multicolvin"
- ]
-}
-
-# multi column vindex produces Equal plan in gen4 and Scatter in v3
-"select * from multicol_tbl where cola = 1 and colb = 2"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb = 2",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with different order places the vindex keys in correct order
-"select * from multicol_tbl where colb = 2 and cola = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 2 and cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "INT64(2)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex produces IN plan in gen4 and Scatter in v3
-"select * from multicol_tbl where cola in (1,2) and colb in (3,4)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in (1, 2) and colb in (3, 4)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in ::__vals0 and colb in ::__vals1",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))",
- "(INT64(3), INT64(4))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with different order places the vindex keys in correct order in IN plan in gen4
-"select * from multicol_tbl where colb in (3,4) and cola in (1,2)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (3, 4) and cola in (1, 2)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in ::__vals1 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(2))",
- "(INT64(3), INT64(4))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with different order with one IN predicate and one equality
-"select * from multicol_tbl where colb = 1 and cola in (3,4)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 1 and cola in (3, 4)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 1 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(3), INT64(4))",
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with both IN predicate and equality predicate
-"select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(4)",
- "INT64(7)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with one column with equal followed by IN predicate, ordering matters for now
-"select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 4 and colb in (1, 10) and cola in (5, 6)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb = 4 and colb in ::__vals1 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(5), INT64(6))",
- "(INT64(1), INT64(10))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with one column with IN followed by equal predicate, ordering matters for now
-"select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in (5, 6)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in ::__vals0",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(5), INT64(6))",
- "INT64(4)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex with better plan selection
-"select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(5)",
- "INT64(6)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex as tuple
-"select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "MultiEqual",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))",
- "Table": "multicol_tbl",
- "Values": [
- "(INT64(1), INT64(3))",
- "(INT64(2), INT64(4))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex, partial vindex with SelectEqual
-"select * from multicol_tbl where cola = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "SubShard",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN
-"select * from multicol_tbl where cola = 1 and colb in (2,3)"
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb in (2, 3)",
- "Table": "multicol_tbl"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from multicol_tbl where 1 != 1",
- "Query": "select * from multicol_tbl where cola = 1 and colb in ::__vals1",
- "Table": "multicol_tbl",
- "Values": [
- "INT64(1)",
- "(INT64(2), INT64(3))"
- ],
- "Vindex": "multicolIdx"
- },
- "TablesUsed": [
- "user.multicol_tbl"
- ]
-}
-
-# left join with where clause - should be handled by gen4 but still isn't
-"select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5"
-{
- "QueryType": "SELECT",
- "Original": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where 1 != 1",
- "Query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-Gen4 plan same as above
-
-# filter on outer join should not be used for routing
-"select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user_extra left outer join user on user_extra.user_id = user.id WHERE user.id IS NULL",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where 1 != 1",
- "Query": "select `user`.col from user_extra left join `user` on user_extra.user_id = `user`.id where `user`.id is null",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# conditions following a null safe comparison operator can be used for routing
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id <=> NULL AND music.user_id = 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE user.id \u003c=\u003e NULL AND music.user_id = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 10 and `user`.id \u003c=\u003e null",
- "Table": "`user`, music",
- "Values": [
- "INT64(10)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, where conditions using both sides of the join are not pulled into the join conditions
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE (user.name = 'Trent Reznor' OR music.genre = 'pop') AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, where conditions using both sides of the join are not pulled into the join conditions (swapped order)
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND (user.name = 'Trent Reznor' OR music.genre = 'pop')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music left join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music left join `user` on music.user_id = `user`.id where music.user_id = 5 and (`user`.`name` = 'Trent Reznor' or music.genre = 'pop')",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, null intolerant where conditions using both sides of the join are transformed to inner joins
-"SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.componist = user.name",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id = 5 and music.user_id = `user`.id and music.componist = `user`.`name`",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# For left joins, null intolerant where conditions using `IS NOT NULL` allow outer join simplification
-"SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music LEFT OUTER JOIN user ON user.id = music.user_id WHERE music.user_id = 5 AND user.id IS NOT NULL",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id = 5 and `user`.id is not null and `user`.id = music.user_id",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# optimize ORs to IN route op codes #1
-"select col from user where id = 1 or id = 2"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# optimize ORs to IN route op codes #2
-"select col from user where id = 1 or id = 2 or id = 3"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2 or id = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 or id = 2 or id = 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or id = 3",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# optimize ORs to IN route op codes #3
-"select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)",
- "Table": "`user`",
- "Values": [
- "(INT64(1), INT64(2), INT64(3), INT64(4))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Don't pick a vindex for an IS NULL predicate if it's a lookup vindex
-"select id from music where id is null and user_id in (1,2)"
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null and user_id in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null and user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from music where id is null and user_id in (1,2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music where id is null and user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases.json b/go/vt/vtgate/planbuilder/testdata/flush_cases.json
new file mode 100644
index 00000000000..1631cc364a4
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/flush_cases.json
@@ -0,0 +1,57 @@
+[
+ {
+ "comment": "Flush statement",
+ "query": "flush tables unsharded, music",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush tables unsharded, music",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush tables unsharded, music"
+ },
+ "TablesUsed": [
+ "main.music",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Flush statement with no tables",
+ "query": "flush local tables with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables with read lock",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables with read lock"
+ }
+ }
+ },
+ {
+ "comment": "Flush statement with flush options",
+ "query": "flush no_write_to_binlog hosts, logs",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush no_write_to_binlog hosts, logs",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local hosts, logs"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases.txt b/go/vt/vtgate/planbuilder/testdata/flush_cases.txt
deleted file mode 100644
index c94e4316c2e..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/flush_cases.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-# Flush statement
-"flush tables unsharded, music"
-{
- "QueryType": "FLUSH",
- "Original": "flush tables unsharded, music",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush tables unsharded, music"
- },
- "TablesUsed": [
- "main.music",
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Flush statement with no tables
-"flush local tables with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables with read lock",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables with read lock"
- }
-}
-Gen4 plan same as above
-
-# Flush statement with flush options
-"flush no_write_to_binlog hosts, logs"
-{
- "QueryType": "FLUSH",
- "Original": "flush no_write_to_binlog hosts, logs",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local hosts, logs"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json
new file mode 100644
index 00000000000..f2629ccfd67
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json
@@ -0,0 +1,149 @@
+[
+ {
+ "comment": "Flush statement",
+ "query": "flush local tables user, unsharded_a, user_extra with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables user, unsharded_a, user_extra with read lock",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded_a with read lock"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables `user`, user_extra with read lock"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Flush statement with flush options",
+ "query": "flush no_write_to_binlog hosts, logs",
+ "plan": "keyspace not specified"
+ },
+ {
+ "comment": "Flush statement with routing rules",
+ "query": "flush local tables route1, route2",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables route1, route2",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables `user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Incorrect tables in flush",
+ "query": "flush tables user.a with read lock",
+ "plan": "table a not found"
+ },
+ {
+ "comment": "Unknown tables in unsharded keyspaces are allowed",
+ "query": "flush tables main.a with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush tables main.a with read lock",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush tables a with read lock"
+ },
+ "TablesUsed": [
+ "main.a"
+ ]
+ }
+ },
+ {
+ "comment": "Flush statement with 3 keyspaces",
+ "query": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock",
+ "plan": {
+ "QueryType": "FLUSH",
+ "Original": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded_a with read lock"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables unsharded_tab with read lock"
+ },
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "flush local tables `user`, user_extra with read lock"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main_2.unsharded_tab",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt
deleted file mode 100644
index 3bc63561179..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.txt
+++ /dev/null
@@ -1,146 +0,0 @@
-# Flush statement
-"flush local tables user, unsharded_a, user_extra with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables user, unsharded_a, user_extra with read lock",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded_a with read lock"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables `user`, user_extra with read lock"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Flush statement with flush options
-"flush no_write_to_binlog hosts, logs"
-"keyspace not specified"
-Gen4 plan same as above
-
-# Flush statement with routing rules
-"flush local tables route1, route2"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables route1, route2",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables `user`"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Incorrect tables in flush
-"flush tables user.a with read lock"
-"table a not found"
-Gen4 plan same as above
-
-# Unknown tables in unsharded keyspaces are allowed
-"flush tables main.a with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush tables main.a with read lock",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush tables a with read lock"
- },
- "TablesUsed": [
- "main.a"
- ]
-}
-Gen4 plan same as above
-
-# Flush statement with 3 keyspaces
-"flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock"
-{
- "QueryType": "FLUSH",
- "Original": "flush local tables user, unsharded_a, user_extra, unsharded_tab with read lock",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded_a with read lock"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables unsharded_tab with read lock"
- },
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "flush local tables `user`, user_extra with read lock"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main_2.unsharded_tab",
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json
new file mode 100644
index 00000000000..71e20866a07
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json
@@ -0,0 +1,6454 @@
+[
+ {
+ "comment": "Single table sharded scatter",
+ "query": "select col from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Single table unsharded",
+ "query": "select col from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Select from sequence",
+ "query": "select next 2 values from seq",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select next 2 values from seq",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 2 values from seq where 1 != 1",
+ "Query": "select next 2 values from seq",
+ "Table": "seq"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select next 2 values from seq",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 2 values from seq where 1 != 1",
+ "Query": "select next 2 values from seq",
+ "Table": "seq"
+ },
+ "TablesUsed": [
+ "main.seq"
+ ]
+ }
+ },
+ {
+ "comment": "select next from non-sequence table",
+ "query": "select next value from user",
+ "plan": "NEXT used on a non-sequence table"
+ },
+ {
+ "comment": "select next in derived table",
+ "query": "select 1 from (select next value from seq) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select next value from seq) t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
+ "Query": "select 1 from (select next 1 values from seq) as t",
+ "Table": "seq"
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "select next in derived table",
+ "query": "select * from (select next value from seq) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select next value from seq) t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
+ "Query": "select * from (select next 1 values from seq) as t",
+ "Table": "seq"
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "select next in subquery",
+ "query": "select 1 from user where id in (select next value from seq)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user where id in (select next value from seq)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 1 values from seq where 1 != 1",
+ "Query": "select next 1 values from seq",
+ "Table": "seq"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "select next in projection",
+ "query": "select (select next value from seq) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select next value from seq) from user",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Next",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select next 1 values from seq where 1 != 1",
+ "Query": "select next 1 values from seq",
+ "Table": "seq"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from `user` where 1 != 1",
+ "Query": "select :__sq1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "Incorrect usage/placement of 'NEXT'"
+ },
+ {
+ "comment": "Select from reference",
+ "query": "select * from ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from ref where 1 != 1",
+ "Query": "select * from ref",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from ref where 1 != 1",
+ "Query": "select * from ref",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table unsharded",
+ "query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Multi-table, multi-chunk",
+ "query": "select music.col from user join music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user join music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user join music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name matches, and there's no alias.",
+ "query": "select * from second_user.user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name matches, and there's an alias.",
+ "query": "select * from second_user.user as a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from second_user.user as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name does not match, and there's no alias.",
+ "query": "select * from route1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as route1 where 1 != 1",
+ "Query": "select * from `user` as route1",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as route1 where 1 != 1",
+ "Query": "select * from `user` as route1",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules where table name does not match, and there's an alias.",
+ "query": "select * from route1 as a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1 as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from route1 as a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as a where 1 != 1",
+ "Query": "select * from `user` as a",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules with primary targeting",
+ "query": "select * from primary_redirect",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from primary_redirect",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
+ "Query": "select * from `user` as primary_redirect",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from primary_redirect",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
+ "Query": "select * from `user` as primary_redirect",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules bad table",
+ "query": "select * from bad_table",
+ "plan": "Unknown database 'noks' in vschema"
+ },
+ {
+ "comment": "routing rules disabled table",
+ "query": "select * from disabled",
+ "plan": "table disabled has been disabled"
+ },
+ {
+ "comment": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "query": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where 1 != 1",
+ "Query": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where foo.col = 42",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select foo.col from `user` as foo, `user` where 1 != 1",
+ "Query": "select foo.col from `user` as foo, `user` where foo.col = 42 and foo.id = `user`.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "query": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "music_id": 1
+ },
+ "TableName": "music_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.foo, music.id from music where 1 != 1",
+ "Query": "select music.foo, music.id from music where music.col = 42",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :music_id",
+ "Table": "`user`",
+ "Values": [
+ ":music_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "music_id": 0
+ },
+ "TableName": "music_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id, music.foo from music where 1 != 1",
+ "Query": "select music.id, music.foo from music where music.col = 42",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :music_id",
+ "Table": "`user`",
+ "Values": [
+ ":music_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "',' join",
+ "query": "select music.col from user, music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user, music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select music.col from user, music",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col from music where 1 != 1",
+ "Query": "select music.col from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "',' join unsharded",
+ "query": "select u1.a, u2.a from unsharded u1, unsharded u2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "',' 3-way join unsharded",
+ "query": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
+ "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Left join, single chunk",
+ "query": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Left join, multi-chunk",
+ "query": "select u.col from user u left join unsharded m on u.a = m.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col from user u left join unsharded m on u.a = m.b",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u_a": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.col from `user` as u where 1 != 1",
+ "Query": "select u.a, u.col from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m where 1 != 1",
+ "Query": "select 1 from unsharded as m where m.b = :u_a",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Three-way left join",
+ "query": "select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "m1_col": 0
+ },
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 where m1.col = :user_col",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m2.foo from unsharded as m2 where 1 != 1",
+ "Query": "select m2.foo from unsharded as m2 where m2.col = :m1_col",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Three-way left join, right-associated",
+ "query": "select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinVars": {
+ "e_col": 0
+ },
+ "TableName": "user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.col from user_extra as e where 1 != 1",
+ "Query": "select e.col from user_extra as e where e.col = :user_col",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
+ "Query": "select 1 from unsharded as m1 where m1.col = :e_col",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Right join",
+ "query": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Right join with a join LHS",
+ "query": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Straight-join (Gen4 ignores the straight_join hint)",
+ "query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
+ "Query": "select m1.col from unsharded as m1 join unsharded as m2",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Three-way join",
+ "query": "select user.col from user join unsharded as m1 join unsharded as m2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
+ "Query": "select 1 from unsharded as m1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m2 where 1 != 1",
+ "Query": "select 1 from unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
+ "Query": "select 1 from unsharded as m1, unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Parenthesized, single chunk",
+ "query": "select user.col from user join (unsharded as m1 join unsharded as m2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (unsharded as m1 join unsharded as m2) where 1 != 1",
+ "Query": "select 1 from (unsharded as m1 join unsharded as m2)",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
+ "Query": "select 1 from unsharded as m1, unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Parenthesized, multi-chunk",
+ "query": "select user.col from user join (user as u1 join unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (user as u1 join unsharded)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
+ "Query": "select 1 from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join (user as u1 join unsharded)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
+ "Query": "select 1 from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "index hints, make sure they are not stripped.",
+ "query": "select user.col from user use index(a)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple index hints, make sure they are not stripped.",
+ "query": "select user.col from user use index(a) use index for group by (b)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a) use index for group by (b)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user use index(a) use index for group by (b)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
+ "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex",
+ "query": "select user.col from user join user_extra on user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex (parenthesized ON clause)",
+ "query": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex, with a stray condition",
+ "query": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex, swapped operands",
+ "query": "select user.col from user join user_extra on user_extra.user_id = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where user_extra.user_id = `user`.id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable sharded join on unique vindex, and condition",
+ "query": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join on unique vindex, inequality",
+ "query": "select user.col from user join user_extra on user.id < user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id < user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where :user_id < user_extra.user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id < user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where :user_id < user_extra.user_id",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-col reference RHS",
+ "query": "select user.col from user join user_extra on user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = 5",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-col reference LHS",
+ "query": "select user.col from user join user_extra on 5 = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on 5 = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on 5 = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-vindex col",
+ "query": "select user.col from user join user_extra on user.id = user_extra.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.id = :user_extra_col",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "sharded join, non-unique vindex",
+ "query": "select user.col from user_extra join user on user_extra.user_id = user.name",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_user_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
+ "Query": "select user_extra.user_id from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_user_id"
+ ],
+ "Vindex": "name_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "user_name": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.`name`, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.`name`, `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.user_id = :user_name",
+ "Table": "user_extra",
+ "Values": [
+ ":user_name"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join with reference table",
+ "query": "select user.col from user join ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join ref where 1 != 1",
+ "Query": "select `user`.col from `user` join ref",
+ "Table": "`user`, ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, ref where 1 != 1",
+ "Query": "select `user`.col from `user`, ref",
+ "Table": "`user`, ref"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "reference table self-join",
+ "query": "select r1.col from ref r1 join ref",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select r1.col from ref r1 join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select r1.col from ref as r1 join ref where 1 != 1",
+ "Query": "select r1.col from ref as r1 join ref",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select r1.col from ref r1 join ref",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select r1.col from ref as r1, ref where 1 != 1",
+ "Query": "select r1.col from ref as r1, ref",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "reference table can merge with other opcodes left to right.",
+ "query": "select ref.col from ref join user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref join `user` where 1 != 1",
+ "Query": "select ref.col from ref join `user`",
+ "Table": "`user`, ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref, `user` where 1 != 1",
+ "Query": "select ref.col from ref, `user`",
+ "Table": "`user`, ref"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "reference table can merge with other opcodes left to right and vindex value is in the plan.\n# This tests that route.Merge also copies the condition to the LHS.",
+ "query": "select ref.col from ref join (select aa from user where user.id=1) user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1",
+ "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`",
+ "Table": "`user`, ref",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ref.col from ref, (select aa from `user` where 1 != 1) as `user` where 1 != 1",
+ "Query": "select ref.col from ref, (select aa from `user` where `user`.id = 1) as `user`",
+ "Table": "`user`, ref",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for join, unsharded route wins if we can't find a merged route",
+ "query": "select route2.col from route2 join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select route2.col from route2 join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
+ "Query": "select route2.col from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select route2.col from route2 join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
+ "Query": "select route2.col from unsharded as route2",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table",
+ "query": "select id from (select id, col from user where id = 5) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from user where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from user where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join",
+ "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t, user_extra where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t, user_extra where t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join, and aliased references",
+ "query": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t, user_extra where 1 != 1",
+ "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t, user_extra where t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join, duplicate columns",
+ "query": "select t.id from (select user.id, id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
+ "v3-plan": "duplicate column aliases: id",
+ "gen4-plan": "Duplicate column name 'id'"
+ },
+ {
+ "comment": "derived table in RHS of join",
+ "query": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id",
+ "Table": "user_extra, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from user_extra, (select id from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select t.id from user_extra, (select id from `user` where id = 5) as t where t.id = user_extra.user_id",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table in FROM with cross-shard join",
+ "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :t_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
+ "Query": "select t.id from (select id from `user` where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :t_id",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for derived table",
+ "query": "select id from (select id, col from route1 where id = 5) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1 where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1 where id = 5) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table missing columns",
+ "query": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
+ "Query": "select t.id from (select id from `user`) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": "symbol t.col not found"
+ },
+ {
+ "comment": "routing rules for derived table where the constraint is in the outer query",
+ "query": "select id from (select id, col from route1) as t where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1) as t where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id, col from route1) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules for derived table where the constraint is in the outer query",
+ "query": "select id from (select id+col as foo from route1) as t where foo = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id+col as foo from route1) as t where foo = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id + col as foo from `user` as route1 where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id + col as foo from `user` as route1) as t where foo = 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": "symbol id not found"
+ },
+ {
+ "comment": "push predicate on joined derived tables",
+ "query": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t join (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s on t.id = s.id where 1 != 1",
+ "Query": "select t.id from (select id, textcol1 as baz from `user` as route1) as t join (select id, textcol1 + textcol1 as baz from `user`) as s on t.id = s.id where t.baz = '3' and s.baz = '3'",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t, (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s where 1 != 1",
+ "Query": "select t.id from (select id, textcol1 as baz from `user` as route1 where textcol1 = '3') as t, (select id, textcol1 + textcol1 as baz from `user` where textcol1 + textcol1 = '3') as s where t.id = s.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "recursive derived table predicate push down",
+ "query": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user`) as u) as t where bar = 5",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where colA + colB + 4 = 5) as u) as t",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "recursive derived table lookups",
+ "query": "select id from (select id from (select id from user) as u) as t where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id from (select id from `user`) as u) as t where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
+ "Query": "select id from (select id from (select id from `user` where id = 5) as u) as t",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "merge derived tables with single-shard routes",
+ "query": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1",
+ "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u, (select col from user_extra where 1 != 1) as e where 1 != 1",
+ "Query": "select u.col, e.col from (select col from `user` where id = 5) as u, (select col from user_extra where user_id = 5) as e",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join of information_schema with normal table",
+ "query": "select unsharded.foo from information_schema.a join unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from information_schema.a join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "information_schema.a_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.a where 1 != 1",
+ "Query": "select 1 from information_schema.a",
+ "Table": "information_schema.a"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from information_schema.a join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "information_schema.a_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.a where 1 != 1",
+ "Query": "select 1 from information_schema.a",
+ "Table": "information_schema.a"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "join of normal table with information_schema",
+ "query": "select unsharded.foo from unsharded join information_schema.a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from unsharded join information_schema.a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_information_schema.a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.a where 1 != 1",
+ "Query": "select 1 from information_schema.a",
+ "Table": "information_schema.a"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.foo from unsharded join information_schema.a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_information_schema.a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
+ "Query": "select unsharded.foo from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from information_schema.a where 1 != 1",
+ "Query": "select 1 from information_schema.a",
+ "Table": "information_schema.a"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up on join with cross-shard derived table",
+ "query": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_col1": 0,
+ "t_id": 1
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t_col1": 0,
+ "t_id": 1
+ },
+ "TableName": "`user`_user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up on within cross-shard derived table",
+ "query": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "user_col": 2
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1, `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Join with cross-shard derived table on rhs",
+ "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "unsharded_a_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
+ "Query": "select 1 from unsharded_a as ua",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "unsharded_a_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
+ "Query": "select 1 from unsharded_a as ua",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Join with cross-shard derived table on rhs - push down join predicate to derived table",
+ "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "ua_id": 0
+ },
+ "TableName": "unsharded_a_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select ua.id from unsharded_a as ua where 1 != 1",
+ "Query": "select ua.id from unsharded_a as ua",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id",
+ "Table": "`user`",
+ "Values": [
+ ":ua_id"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, single route",
+ "query": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1 where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause as sub-expression",
+ "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1 where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "IN subquery in ON clause, single route",
+ "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1 where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
+ "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, with join primitives",
+ "query": "select unsharded.col from unsharded join user on user.col in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, with left join primitives\n# The subquery is not pulled all the way out.",
+ "query": "select unsharded.col from unsharded left join user on user.col in (select col from user)",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded left join user on user.col in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "subquery in ON clause, with join primitives, and join on top\n# The subquery is not pulled all the way out.",
+ "query": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`_unsharded_a",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
+ "Query": "select unsharded.col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a where 1 != 1",
+ "Query": "select 1 from unsharded_a",
+ "Table": "unsharded_a"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_unsharded, unsharded_a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col from unsharded, unsharded_a where 1 != 1",
+ "Query": "select unsharded.col from unsharded, unsharded_a",
+ "Table": "unsharded, unsharded_a"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "keyspace-qualified queries",
+ "query": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_col2": 1
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col1, `user`.col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
+ "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "user_col2": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col2, `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col2, `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
+ "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "implicit table reference for unsharded keyspace",
+ "query": "select main.foo.col from main.foo",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select main.foo.col from main.foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select foo.col from foo where 1 != 1",
+ "Query": "select foo.col from foo",
+ "Table": "foo"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select main.foo.col from main.foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select foo.col from foo where 1 != 1",
+ "Query": "select foo.col from foo",
+ "Table": "foo"
+ },
+ "TablesUsed": [
+ "main.foo"
+ ]
+ }
+ },
+ {
+ "comment": "col refs should be case-insensitive",
+ "query": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1",
+ "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
+ "Query": "select `user`.col from `user`, user_extra where `user`.ID = user_extra.User_Id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with join primitive (FROM)",
+ "query": "select id, t.id from (select user.id from user join user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, t.id from (select user.id from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, t.id from (select user.id from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "database call in ON clause.\n# The on clause is weird because the substitution must even for root expressions.",
+ "query": "select u1.a from unsharded u1 join unsharded u2 on database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
+ "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
+ "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for dual",
+ "query": "select last_insert_id()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id()",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__lastInsertId as last_insert_id()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id()",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__lastInsertId as last_insert_id()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for sharded keyspace",
+ "query": "select last_insert_id() from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for unsharded route",
+ "query": "select last_insert_id() from main.unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "join with bindvariables",
+ "query": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.assembly_id = :user_id and user_extra.user_id = 2",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_assembly_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.assembly_id from user_extra where 1 != 1",
+ "Query": "select user_extra.assembly_id from user_extra where user_extra.user_id = 2",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user` where `user`.id = :user_extra_assembly_id",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_assembly_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "verify ',' vs JOIN precedence",
+ "query": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a",
+ "v3-plan": "symbol u1.a not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u1.a from unsharded as u1, unsharded as u2 join unsharded as u3 on u1.a = u2.a where 1 != 1",
+ "Query": "select u1.a from unsharded as u1, unsharded as u2 join unsharded as u3 on u1.a = u2.a",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "first expression fails for ',' join (code coverage: ensure error is returned)",
+ "query": "select user.foo.col from user.foo, user",
+ "plan": "table foo not found"
+ },
+ {
+ "comment": "table names should be case-sensitive",
+ "query": "select unsharded.id from unsharded where Unsharded.val = 1",
+ "v3-plan": "symbol Unsharded.val not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.id from unsharded where Unsharded.val = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where Unsharded.val = 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "implicit table reference for sharded keyspace",
+ "query": "select user.foo.col from user.foo",
+ "plan": "table foo not found"
+ },
+ {
+ "comment": "duplicate symbols",
+ "query": "select user.id from user join user",
+ "plan": "Not unique table/alias: 'user'"
+ },
+ {
+ "comment": "duplicate symbols for merging routes",
+ "query": "select user.id from user join user_extra user on user.id = user.user_id",
+ "plan": "Not unique table/alias: 'user'"
+ },
+ {
+ "comment": "non-existent table",
+ "query": "select c from t",
+ "plan": "table t not found"
+ },
+ {
+ "comment": "non-existent table on left of join",
+ "query": "select c from t join user",
+ "plan": "table t not found"
+ },
+ {
+ "comment": "non-existent table on right of join",
+ "query": "select c from user join t",
+ "plan": "table t not found"
+ },
+ {
+ "comment": "query with parens is planned correctly",
+ "query": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
+ "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
+ "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "gen4 - optimise plan by merging user_extra and music first, and then querying for user info",
+ "query": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "ue_user_id": 1
+ },
+ "TableName": "`user`_user_extra_music",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, u.id from `user` as u where 1 != 1",
+ "Query": "select 1, u.id from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.user_id from user_extra as ue where 1 != 1",
+ "Query": "select ue.user_id from user_extra as ue where ue.id = :u_id",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.user_id = :ue_user_id",
+ "Table": "music",
+ "Values": [
+ ":ue_user_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "music, user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, 1 from user_extra as ue, music as m where 1 != 1",
+ "Query": "select ue.id, 1 from user_extra as ue, music as m where m.user_id = ue.user_id",
+ "Table": "music, user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u where 1 != 1",
+ "Query": "select 1 from `user` as u where u.id = :ue_id",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join column selected as alias",
+ "query": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1",
+ "Query": "select ue.id as ueid from user_extra as ue where ue.id = :u_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "ue_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.id, ue.id as ueid from user_extra as ue where 1 != 1",
+ "Query": "select ue.id, ue.id as ueid from user_extra as ue",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u where u.id = :ue_id",
+ "Table": "`user`",
+ "Values": [
+ ":ue_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "alias on column from derived table. TODO: to support alias in SimpleProjection engine primitive.",
+ "query": "select a as k from (select count(*) as a from user) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a as k from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a as k from (select count(*) as a from user) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select star from derived table on expandable and unsharded table",
+ "query": "select u.* from (select * from unsharded) u",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.* from (select * from unsharded) u",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
+ "Query": "select u.* from (select * from unsharded) as u",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.* from (select * from unsharded) u",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
+ "Query": "select u.* from (select * from unsharded) as u",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "filtering on a cross-shard derived table",
+ "query": "select id from (select user.id, user.col from user join user_extra) as t where id=5",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select user.id, user.col from user join user_extra) as t where id=5",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col from `user` where `user`.id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "expression on a cross-shard derived table",
+ "query": "select id+1 from (select user.id, user.col from user join user_extra) as t",
+ "v3-plan": "unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id+1 from (select user.id, user.col from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, `user`.id + 1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col, `user`.id + 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with aliased columns and outer predicate pushed in derived table",
+ "query": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "v3-plan": "unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
+ "Query": "select u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with aliased columns predicate in both the outer and inner",
+ "query": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1",
+ "v3-plan": "unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
+ "Query": "select u.a from (select id as b, `name` from `user` where b = 1 and `name` = 1) as u(a, n)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with aliased columns and a join that requires pushProjection",
+ "query": "select i+1 from (select user.id from user join user_extra) t(i)",
+ "v3-plan": "unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select i+1 from (select user.id from user join user_extra) t(i)",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.id + 1 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.id + 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "two subqueries with different Select and OpCode",
+ "query": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from user_extra where 1 != 1",
+ "Query": "select id from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where col = :__sq1 and :__sq_has_values2 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq2"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values2",
+ "__sq2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from user_extra where 1 != 1",
+ "Query": "select id from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and col = :__sq2",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "join on int columns",
+ "query": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u_intcol": 1
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.intcol from `user` as u where 1 != 1",
+ "Query": "select u.id, u.intcol from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as uu where 1 != 1",
+ "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u_intcol": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.intcol, u.id from `user` as u where 1 != 1",
+ "Query": "select u.intcol, u.id from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as uu where 1 != 1",
+ "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Duplicate output column from derived table having a join",
+ "query": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1",
+ "v3-plan": "unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "t_col1": 0
+ },
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1, 0 from `user` where 1 != 1",
+ "Query": "select `user`.col1, 0 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.a = :t_col1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "left join where clauses #2",
+ "query": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "coalesce(user_extra.col, 4) = 5",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.col from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "dont merge unsharded tables from different keyspaces",
+ "query": "select 1 from main.unsharded join main_2.unsharded_tab",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_unsharded_tab",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
+ "Query": "select 1 from unsharded_tab",
+ "Table": "unsharded_tab"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "unsharded_unsharded_tab",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main_2",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
+ "Query": "select 1 from unsharded_tab",
+ "Table": "unsharded_tab"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main_2.unsharded_tab"
+ ]
+ }
+ },
+ {
+ "comment": "Unsharded join with using",
+ "query": "select * from unsharded_a join unsharded_b using (propertyId);",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
+ "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
+ "Table": "unsharded_a, unsharded_b"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
+ "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
+ "Table": "unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "Column aliases in Derived Table",
+ "query": "select id2 from (select id from user) as x (id2)",
+ "v3-plan": "unsupported: column aliases in derived table",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id2 from (select id from user) as x (id2)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id2 from (select id from `user` where 1 != 1) as x(id2) where 1 != 1",
+ "Query": "select id2 from (select id from `user`) as x(id2)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "single unsharded keyspace with derived table",
+ "query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
+ "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
+ "Table": "unsharded, unsharded_b, unsharded_a"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
+ "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
+ "Table": "unsharded, unsharded_a, unsharded_b"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "main.unsharded_b"
+ ]
+ }
+ },
+ {
+ "comment": "query builder with derived table having join inside it",
+ "query": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join with expressions",
+ "query": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join with expressions, with three-way join (different code path)",
+ "query": "select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as e where 1 != 1",
+ "Query": "select 1 from user_extra as e",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "left join with expressions coming from both sides",
+ "query": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0,
+ "user_foo": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.foo from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.foo from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_foo + user_extra.col + 1 from user_extra where 1 != 1",
+ "Query": "select :user_foo + user_extra.col + 1 from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Do not rewrite derived expressions when the derived table is merged with the outer",
+ "query": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*)",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join with USING construct",
+ "query": "select * from authoritative join unsharded_authoritative using(col1)",
+ "v3-plan": "unsupported: join with USING(column_list) clause for complex queries",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative join unsharded_authoritative using(col1)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:3,R:0",
+ "JoinVars": {
+ "authoritative_col1": 0
+ },
+ "TableName": "authoritative_unsharded_authoritative",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1",
+ "Query": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative",
+ "Table": "authoritative"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where 1 != 1",
+ "Query": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where unsharded_authoritative.col1 = :authoritative_col1",
+ "Table": "unsharded_authoritative"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded_authoritative",
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
+ "Query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user`) as t1) as t2) as t3 where push_it = 12",
+ "Table": "`user`",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
+ "Query": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where id = 12) as t1) as t2) as t3",
+ "Table": "`user`",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "missing and ambiguous column info is OK as long as we can send the query to a single unsharded keyspace",
+ "query": "select missing_column from unsharded, unsharded_a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select missing_column from unsharded, unsharded_a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select missing_column from unsharded, unsharded_a where 1 != 1",
+ "Query": "select missing_column from unsharded, unsharded_a",
+ "Table": "unsharded, unsharded_a"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select missing_column from unsharded, unsharded_a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select missing_column from unsharded, unsharded_a where 1 != 1",
+ "Query": "select missing_column from unsharded, unsharded_a",
+ "Table": "unsharded, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "missing and ambiguous column info is not valid when we have two different unsharded keyspaces in the query",
+ "query": "select missing_column from unsharded, unsharded_tab",
+ "v3-plan": "symbol missing_column not found",
+ "gen4-plan": "Column 'missing_column' in field list is ambiguous"
+ },
+ {
+ "comment": "join predicate only depending on the RHS should not turn outer join into inner join",
+ "query": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "t1_t1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.id1 from t1 where 1 != 1",
+ "Query": "select t1.id1 from t1",
+ "Table": "t1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t2.id1 from t1 as t2 where 1 != 1",
+ "Query": "select t2.id1 from t1 as t2 where t2.id1 = t2.id2",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "t1_t1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t1.id1 from t1 where 1 != 1",
+ "Query": "select t1.id1 from t1",
+ "Table": "t1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select t2.id1 from t1 as t2 where 1 != 1",
+ "Query": "select t2.id1 from t1 as t2 where t2.id1 = t2.id2",
+ "Table": "t1"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "zlookup_unique.t1"
+ ]
+ }
+ },
+ {
+ "comment": "join query using table with muticolumn vindex",
+ "query": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "m1_cola": 1
+ },
+ "TableName": "multicol_tbl_multicol_tbl",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, m1.cola from multicol_tbl as m1 where 1 != 1",
+ "Query": "select 1, m1.cola from multicol_tbl as m1",
+ "Table": "multicol_tbl"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from multicol_tbl as m2 where 1 != 1",
+ "Query": "select 1 from multicol_tbl as m2 where m2.cola = :m1_cola",
+ "Table": "multicol_tbl"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "m1_cola": 0
+ },
+ "TableName": "multicol_tbl_multicol_tbl",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select m1.cola, 1 from multicol_tbl as m1 where 1 != 1",
+ "Query": "select m1.cola, 1 from multicol_tbl as m1",
+ "Table": "multicol_tbl"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "SubShard",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from multicol_tbl as m2 where 1 != 1",
+ "Query": "select 1 from multicol_tbl as m2 where m2.cola = :m1_cola",
+ "Table": "multicol_tbl",
+ "Values": [
+ ":m1_cola"
+ ],
+ "Vindex": "multicolIdx"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.multicol_tbl"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.txt b/go/vt/vtgate/planbuilder/testdata/from_cases.txt
deleted file mode 100644
index 59c42783f31..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/from_cases.txt
+++ /dev/null
@@ -1,6112 +0,0 @@
-# Single table sharded scatter
-"select col from user"
-{
- "QueryType": "SELECT",
- "Original": "select col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Single table unsharded
-"select col from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Select from sequence
-"select next 2 values from seq"
-{
- "QueryType": "SELECT",
- "Original": "select next 2 values from seq",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 2 values from seq where 1 != 1",
- "Query": "select next 2 values from seq",
- "Table": "seq"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select next 2 values from seq",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 2 values from seq where 1 != 1",
- "Query": "select next 2 values from seq",
- "Table": "seq"
- },
- "TablesUsed": [
- "main.seq"
- ]
-}
-
-# select next from non-sequence table
-"select next value from user"
-"NEXT used on a non-sequence table"
-Gen4 plan same as above
-
-# select next in derived table
-"select 1 from (select next value from seq) t"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select next value from seq) t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
- "Query": "select 1 from (select next 1 values from seq) as t",
- "Table": "seq"
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# select next in derived table
-"select * from (select next value from seq) t"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select next value from seq) t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select next 1 values from seq where 1 != 1) as t where 1 != 1",
- "Query": "select * from (select next 1 values from seq) as t",
- "Table": "seq"
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# select next in subquery
-"select 1 from user where id in (select next value from seq)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user where id in (select next value from seq)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 1 values from seq where 1 != 1",
- "Query": "select next 1 values from seq",
- "Table": "seq"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# select next in projection
-"select (select next value from seq) from user"
-{
- "QueryType": "SELECT",
- "Original": "select (select next value from seq) from user",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Next",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select next 1 values from seq where 1 != 1",
- "Query": "select next 1 values from seq",
- "Table": "seq"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from `user` where 1 != 1",
- "Query": "select :__sq1 from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-Gen4 error: Incorrect usage/placement of 'NEXT'
-
-# Select from reference
-"select * from ref"
-{
- "QueryType": "SELECT",
- "Original": "select * from ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from ref where 1 != 1",
- "Query": "select * from ref",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from ref where 1 != 1",
- "Query": "select * from ref",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# Multi-table unsharded
-"select m1.col from unsharded as m1 join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Multi-table, multi-chunk
-"select music.col from user join music"
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user join music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user join music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# routing rules where table name matches, and there's no alias.
-"select * from second_user.user"
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules where table name matches, and there's an alias.
-"select * from second_user.user as a"
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from second_user.user as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules where table name does not match, and there's no alias.
-"select * from route1"
-{
- "QueryType": "SELECT",
- "Original": "select * from route1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as route1 where 1 != 1",
- "Query": "select * from `user` as route1",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from route1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as route1 where 1 != 1",
- "Query": "select * from `user` as route1",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules where table name does not match, and there's an alias.
-"select * from route1 as a"
-{
- "QueryType": "SELECT",
- "Original": "select * from route1 as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from route1 as a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as a where 1 != 1",
- "Query": "select * from `user` as a",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules with primary targeting
-"select * from primary_redirect"
-{
- "QueryType": "SELECT",
- "Original": "select * from primary_redirect",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
- "Query": "select * from `user` as primary_redirect",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from primary_redirect",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as primary_redirect where 1 != 1",
- "Query": "select * from `user` as primary_redirect",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules bad table
-"select * from bad_table"
-"Unknown database 'noks' in vschema"
-Gen4 plan same as above
-
-# routing rules disabled table
-"select * from disabled"
-"table disabled has been disabled"
-Gen4 plan same as above
-
-"select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42"
-{
- "QueryType": "SELECT",
- "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where 1 != 1",
- "Query": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where foo.col = 42",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select foo.col from `user` as foo, `user` where 1 != 1",
- "Query": "select foo.col from `user` as foo, `user` where foo.col = 42 and foo.id = `user`.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42"
-{
- "QueryType": "SELECT",
- "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "music_id": 1
- },
- "TableName": "music_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.foo, music.id from music where 1 != 1",
- "Query": "select music.foo, music.id from music where music.col = 42",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :music_id",
- "Table": "`user`",
- "Values": [
- ":music_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "music_id": 0
- },
- "TableName": "music_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id, music.foo from music where 1 != 1",
- "Query": "select music.id, music.foo from music where music.col = 42",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :music_id",
- "Table": "`user`",
- "Values": [
- ":music_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ',' join
-"select music.col from user, music"
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user, music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select music.col from user, music",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col from music where 1 != 1",
- "Query": "select music.col from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ',' join unsharded
-"select u1.a, u2.a from unsharded u1, unsharded u2"
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# ',' 3-way join unsharded
-"select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3"
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1",
- "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Left join, single chunk
-"select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a=m2.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b where 1 != 1",
- "Query": "select m1.col from unsharded as m1 left join unsharded as m2 on m1.a = m2.b",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Left join, multi-chunk
-"select u.col from user u left join unsharded m on u.a = m.b"
-{
- "QueryType": "SELECT",
- "Original": "select u.col from user u left join unsharded m on u.a = m.b",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u_a": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.col from `user` as u where 1 != 1",
- "Query": "select u.a, u.col from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m where 1 != 1",
- "Query": "select 1 from unsharded as m where m.b = :u_a",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Three-way left join
-"select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, m2.foo from user left join unsharded as m1 on user.col = m1.col left join unsharded as m2 on m1.col = m2.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "m1_col": 0
- },
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0,L:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 where m1.col = :user_col",
- "Table": "unsharded"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m2.foo from unsharded as m2 where 1 != 1",
- "Query": "select m2.foo from unsharded as m2 where m2.col = :m1_col",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# Three-way left join, right-associated
-"select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user left join user_extra as e left join unsharded as m1 on m1.col = e.col on user.col = e.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinVars": {
- "e_col": 0
- },
- "TableName": "user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.col from user_extra as e where 1 != 1",
- "Query": "select e.col from user_extra as e where e.col = :user_col",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
- "Query": "select 1 from unsharded as m1 where m1.col = :e_col",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Right join
-"select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a=m2.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b where 1 != 1",
- "Query": "select m1.col from unsharded as m1 right join unsharded as m2 on m1.a = m2.b",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Right join with a join LHS
-"select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a=m2.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2 right join unsharded as m3 on m1.a = m2.b",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# Straight-join (Gen4 ignores the straight_join hint)
-"select m1.col from unsharded as m1 straight_join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1",
- "Query": "select m1.col from unsharded as m1 join unsharded as m2",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Three-way join
-"select user.col from user join unsharded as m1 join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
- "Query": "select 1 from unsharded as m1",
- "Table": "unsharded"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m2 where 1 != 1",
- "Query": "select 1 from unsharded as m2",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
- "Query": "select 1 from unsharded as m1, unsharded as m2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Parenthesized, single chunk
-"select user.col from user join (unsharded as m1 join unsharded as m2)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (unsharded as m1 join unsharded as m2) where 1 != 1",
- "Query": "select 1 from (unsharded as m1 join unsharded as m2)",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1, unsharded as m2 where 1 != 1",
- "Query": "select 1 from unsharded as m1, unsharded as m2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Parenthesized, multi-chunk
-"select user.col from user join (user as u1 join unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (user as u1 join unsharded)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
- "Query": "select 1 from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join (user as u1 join unsharded)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u1 where 1 != 1",
- "Query": "select 1 from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# index hints, make sure they are not stripped.
-"select user.col from user use index(a)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple index hints, make sure they are not stripped.
-"select user.col from user use index(a) use index for group by (b)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a) use index for group by (b)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user use index(a) use index for group by (b)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1",
- "Query": "select `user`.col from `user` use index (a) use index for group by (b)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# mergeable sharded join on unique vindex
-"select user.col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex (parenthesized ON clause)
-"select user.col from user join user_extra on (user.id = user_extra.user_id)"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex, with a stray condition
-"select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.col between 1 and 2 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex, swapped operands
-"select user.col from user join user_extra on user_extra.user_id = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user_extra.user_id = user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where user_extra.user_id = `user`.id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable sharded join on unique vindex, and condition
-"select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.id = 5 and `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join on unique vindex, inequality
-"select user.col from user join user_extra on user.id \u003c user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id \u003c user_extra.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where :user_id \u003c user_extra.user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id \u003c user_extra.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where :user_id \u003c user_extra.user_id",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-col reference RHS
-"select user.col from user join user_extra on user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = 5",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-col reference LHS
-"select user.col from user join user_extra on 5 = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on 5 = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on 5 = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-vindex col
-"select user.col from user join user_extra on user.id = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.id = :user_extra_col",
- "Table": "`user`",
- "Values": [
- ":user_extra_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# sharded join, non-unique vindex
-"select user.col from user_extra join user on user_extra.user_id = user.name"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_user_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
- "Query": "select user_extra.user_id from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id",
- "Table": "`user`",
- "Values": [
- ":user_extra_user_id"
- ],
- "Vindex": "name_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user_extra join user on user_extra.user_id = user.name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "user_name": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.`name`, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.`name`, `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.user_id = :user_name",
- "Table": "user_extra",
- "Values": [
- ":user_name"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join with reference table
-"select user.col from user join ref"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join ref where 1 != 1",
- "Query": "select `user`.col from `user` join ref",
- "Table": "`user`, ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, ref where 1 != 1",
- "Query": "select `user`.col from `user`, ref",
- "Table": "`user`, ref"
- },
- "TablesUsed": [
- "user.ref",
- "user.user"
- ]
-}
-
-# reference table self-join
-"select r1.col from ref r1 join ref"
-{
- "QueryType": "SELECT",
- "Original": "select r1.col from ref r1 join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select r1.col from ref as r1 join ref where 1 != 1",
- "Query": "select r1.col from ref as r1 join ref",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select r1.col from ref r1 join ref",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select r1.col from ref as r1, ref where 1 != 1",
- "Query": "select r1.col from ref as r1, ref",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# reference table can merge with other opcodes left to right.
-"select ref.col from ref join user"
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref join `user` where 1 != 1",
- "Query": "select ref.col from ref join `user`",
- "Table": "`user`, ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref, `user` where 1 != 1",
- "Query": "select ref.col from ref, `user`",
- "Table": "`user`, ref"
- },
- "TablesUsed": [
- "user.ref",
- "user.user"
- ]
-}
-
-# reference table can merge with other opcodes left to right and vindex value is in the plan.
-# This tests that route.Merge also copies the condition to the LHS.
-"select ref.col from ref join (select aa from user where user.id=1) user"
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1",
- "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`",
- "Table": "`user`, ref",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ref.col from ref join (select aa from user where user.id=1) user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ref.col from ref, (select aa from `user` where 1 != 1) as `user` where 1 != 1",
- "Query": "select ref.col from ref, (select aa from `user` where `user`.id = 1) as `user`",
- "Table": "`user`, ref",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.ref",
- "user.user"
- ]
-}
-
-# routing rules for join, unsharded route wins if we can't find a merged route
-"select route2.col from route2 join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select route2.col from route2 join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
- "Query": "select route2.col from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select route2.col from route2 join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1",
- "Query": "select route2.col from unsharded as route2",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user_extra"
- ]
-}
-
-# derived table
-"select id from (select id, col from user where id = 5) as t"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from user where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from user where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with join
-"select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t, user_extra where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t, user_extra where t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with join, and aliased references
-"select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t, user_extra where 1 != 1",
- "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t, user_extra where t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with join, duplicate columns
-"select t.id from (select user.id, id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id"
-"duplicate column aliases: id"
-Gen4 error: Duplicate column name 'id'
-
-# derived table in RHS of join
-"select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id",
- "Table": "user_extra, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from user_extra, (select id from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select t.id from user_extra, (select id from `user` where id = 5) as t where t.id = user_extra.user_id",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table in FROM with cross-shard join
-"select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :t_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1",
- "Query": "select t.id from (select id from `user` where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :t_id",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# routing rules for derived table
-"select id from (select id, col from route1 where id = 5) as t"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1 where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1 where id = 5) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table missing columns
-"select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1",
- "Query": "select t.id from (select id from `user`) as t join user_extra on t.id = user_extra.user_id where t.col = 42",
- "Table": "`user`, user_extra"
- }
-}
-Gen4 error: symbol t.col not found
-
-# routing rules for derived table where the constraint is in the outer query
-"select id from (select id, col from route1) as t where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1) as t where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id, col from route1) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id, col from `user` as route1 where id = 5) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# routing rules for derived table where the constraint is in the outer query
-"select id from (select id+col as foo from route1) as t where foo = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id+col as foo from route1) as t where foo = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id + col as foo from `user` as route1 where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id + col as foo from `user` as route1) as t where foo = 5",
- "Table": "`user`"
- }
-}
-Gen4 error: symbol id not found
-
-# push predicate on joined derived tables
-"select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t join (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s on t.id = s.id where 1 != 1",
- "Query": "select t.id from (select id, textcol1 as baz from `user` as route1) as t join (select id, textcol1 + textcol1 as baz from `user`) as s on t.id = s.id where t.baz = '3' and s.baz = '3'",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t, (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s where 1 != 1",
- "Query": "select t.id from (select id, textcol1 as baz from `user` as route1 where textcol1 = '3') as t, (select id, textcol1 + textcol1 as baz from `user` where textcol1 + textcol1 = '3') as s where t.id = s.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# recursive derived table predicate push down
-"select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5"
-{
- "QueryType": "SELECT",
- "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user`) as u) as t where bar = 5",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where colA + colB + 4 = 5) as u) as t",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# recursive derived table lookups
-"select id from (select id from (select id from user) as u) as t where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id from (select id from `user`) as u) as t where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from (select id from user) as u) as t where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1",
- "Query": "select id from (select id from (select id from `user` where id = 5) as u) as t",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# merge derived tables with single-shard routes
-"select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e"
-{
- "QueryType": "SELECT",
- "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1",
- "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u, (select col from user_extra where 1 != 1) as e where 1 != 1",
- "Query": "select u.col, e.col from (select col from `user` where id = 5) as u, (select col from user_extra where user_id = 5) as e",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join of information_schema with normal table
-"select unsharded.foo from information_schema.a join unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from information_schema.a join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "information_schema.a_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from information_schema.a join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "information_schema.a_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# join of normal table with information_schema
-"select unsharded.foo from unsharded join information_schema.a"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from unsharded join information_schema.a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_information_schema.a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.foo from unsharded join information_schema.a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_information_schema.a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.foo from unsharded where 1 != 1",
- "Query": "select unsharded.foo from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from information_schema.a where 1 != 1",
- "Query": "select 1 from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# wire-up on join with cross-shard derived table
-"select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id"
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_col1": 0,
- "t_id": 1
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "t_col1": 0,
- "t_id": 1
- },
- "TableName": "`user`_user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# wire-up on within cross-shard derived table
-"select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t"
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "user_col": 2
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1, `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Join with cross-shard derived table on rhs
-"select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "unsharded_a_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
- "Query": "select 1 from unsharded_a as ua",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "unsharded_a_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1",
- "Query": "select 1 from unsharded_a as ua",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Join with cross-shard derived table on rhs - push down join predicate to derived table
-"select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "ua_id": 0
- },
- "TableName": "unsharded_a_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select ua.id from unsharded_a as ua where 1 != 1",
- "Query": "select ua.id from unsharded_a as ua",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id",
- "Table": "`user`",
- "Values": [
- ":ua_id"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# subquery in ON clause, single route
-"select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1 where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b",
- "user.user"
- ]
-}
-
-# subquery in ON clause as sub-expression
-"select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1 where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b",
- "user.user"
- ]
-}
-
-# IN subquery in ON clause, single route
-"select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1 where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1",
- "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1",
- "Table": "unsharded_a, unsharded_b"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b",
- "user.user"
- ]
-}
-
-# subquery in ON clause, with join primitives
-"select unsharded.col from unsharded join user on user.col in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# subquery in ON clause, with left join primitives
-# The subquery is not pulled all the way out.
-"select unsharded.col from unsharded left join user on user.col in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded left join user on user.col in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# subquery in ON clause, with join primitives, and join on top
-# The subquery is not pulled all the way out.
-"select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`_unsharded_a",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded where 1 != 1",
- "Query": "select unsharded.col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a where 1 != 1",
- "Query": "select 1 from unsharded_a",
- "Table": "unsharded_a"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_unsharded, unsharded_a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col from unsharded, unsharded_a where 1 != 1",
- "Query": "select unsharded.col from unsharded, unsharded_a",
- "Table": "unsharded, unsharded_a"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "user.user"
- ]
-}
-
-# keyspace-qualified queries
-"select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2"
-{
- "QueryType": "SELECT",
- "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_col2": 1
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col1, `user`.col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
- "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "user_col2": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col2, `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col2, `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1",
- "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# implicit table reference for unsharded keyspace
-"select main.foo.col from main.foo"
-{
- "QueryType": "SELECT",
- "Original": "select main.foo.col from main.foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select foo.col from foo where 1 != 1",
- "Query": "select foo.col from foo",
- "Table": "foo"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select main.foo.col from main.foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select foo.col from foo where 1 != 1",
- "Query": "select foo.col from foo",
- "Table": "foo"
- },
- "TablesUsed": [
- "main.foo"
- ]
-}
-
-# col refs should be case-insensitive
-"select user.col from user join user_extra on user.ID = user_extra.User_Id"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1",
- "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1",
- "Query": "select `user`.col from `user`, user_extra where `user`.ID = user_extra.User_Id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with join primitive (FROM)
-"select id, t.id from (select user.id from user join user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select id, t.id from (select user.id from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, t.id from (select user.id from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# database call in ON clause.
-# The on clause is weird because the substitution must even for root expressions.
-"select u1.a from unsharded u1 join unsharded u2 on database()"
-{
- "QueryType": "SELECT",
- "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
- "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.a from unsharded u1 join unsharded u2 on database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1",
- "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# last_insert_id for dual
-"select last_insert_id()"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id()",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__lastInsertId as last_insert_id()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id()",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__lastInsertId as last_insert_id()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# last_insert_id for sharded keyspace
-"select last_insert_id() from user"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# last_insert_id for unsharded route
-"select last_insert_id() from main.unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as `last_insert_id()` from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# join with bindvariables
-"SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.assembly_id = :user_id and user_extra.user_id = 2",
- "Table": "user_extra",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_assembly_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.assembly_id from user_extra where 1 != 1",
- "Query": "select user_extra.assembly_id from user_extra where user_extra.user_id = 2",
- "Table": "user_extra",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user` where `user`.id = :user_extra_assembly_id",
- "Table": "`user`",
- "Values": [
- ":user_extra_assembly_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# verify ',' vs JOIN precedence
-"select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a"
-"symbol u1.a not found"
-Gen4 plan same as above
-
-# first expression fails for ',' join (code coverage: ensure error is returned)
-"select user.foo.col from user.foo, user"
-"table foo not found"
-Gen4 plan same as above
-
-# table names should be case-sensitive
-"select unsharded.id from unsharded where Unsharded.val = 1"
-"symbol Unsharded.val not found"
-Gen4 plan same as above
-
-# implicit table reference for sharded keyspace
-"select user.foo.col from user.foo"
-"table foo not found"
-Gen4 plan same as above
-
-# duplicate symbols
-"select user.id from user join user"
-"Not unique table/alias: 'user'"
-Gen4 plan same as above
-
-# duplicate symbols for merging routes
-"select user.id from user join user_extra user on user.id = user.user_id"
-"Not unique table/alias: 'user'"
-Gen4 plan same as above
-
-# non-existent table
-"select c from t"
-"table t not found"
-Gen4 plan same as above
-
-# non-existent table on left of join
-"select c from t join user"
-"table t not found"
-Gen4 plan same as above
-
-# non-existent table on right of join
-"select c from user join t"
-"table t not found"
-Gen4 plan same as above
-
-# query with parens is planned correctly
-"select m1.col from (unsharded as m1, unsharded as m2)"
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
- "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1",
- "Query": "select m1.col from (unsharded as m1, unsharded as m2)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# gen4 - optimise plan by merging user_extra and music first, and then querying for user info
-"select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "ue_user_id": 1
- },
- "TableName": "`user`_user_extra_music",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1, u.id from `user` as u where 1 != 1",
- "Query": "select 1, u.id from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.user_id from user_extra as ue where 1 != 1",
- "Query": "select ue.user_id from user_extra as ue where ue.id = :u_id",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.user_id = :ue_user_id",
- "Table": "music",
- "Values": [
- ":ue_user_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "music, user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, 1 from user_extra as ue, music as m where 1 != 1",
- "Query": "select ue.id, 1 from user_extra as ue, music as m where m.user_id = ue.user_id",
- "Table": "music, user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u where 1 != 1",
- "Query": "select 1 from `user` as u where u.id = :ue_id",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join column selected as alias
-"SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1",
- "Query": "select ue.id as ueid from user_extra as ue where ue.id = :u_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "ue_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.id, ue.id as ueid from user_extra as ue where 1 != 1",
- "Query": "select ue.id, ue.id as ueid from user_extra as ue",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u where u.id = :ue_id",
- "Table": "`user`",
- "Values": [
- ":ue_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# alias on column from derived table. TODO: to support alias in SimpleProjection engine primitive.
-"select a as k from (select count(*) as a from user) t"
-{
- "QueryType": "SELECT",
- "Original": "select a as k from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a as k from (select count(*) as a from user) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select star from derived table on expandable and unsharded table
-"select u.* from (select * from unsharded) u"
-{
- "QueryType": "SELECT",
- "Original": "select u.* from (select * from unsharded) u",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
- "Query": "select u.* from (select * from unsharded) as u",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.* from (select * from unsharded) u",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1",
- "Query": "select u.* from (select * from unsharded) as u",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# filtering on a cross-shard derived table
-"select id from (select user.id, user.col from user join user_extra) as t where id=5"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select user.id, user.col from user join user_extra) as t where id=5",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col from `user` where `user`.id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# expression on a cross-shard derived table
-"select id+1 from (select user.id, user.col from user join user_extra) as t"
-"unsupported: expression on results of a cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select id+1 from (select user.id, user.col from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, `user`.id + 1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col, `user`.id + 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with aliased columns and outer predicate pushed in derived table
-"select u.a from (select id as b, name from user) u(a, n) where u.n = 1"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
- "Query": "select u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with aliased columns predicate in both the outer and inner
-"select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
- "Query": "select u.a from (select id as b, `name` from `user` where b = 1 and `name` = 1) as u(a, n)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with aliased columns and a join that requires pushProjection
-"select i+1 from (select user.id from user join user_extra) t(i)"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select i+1 from (select user.id from user join user_extra) t(i)",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.id + 1 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.id + 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# two subqueries with different Select and OpCode
-"select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from user_extra where 1 != 1",
- "Query": "select id from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra limit :__upper_limit",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where col = :__sq1 and :__sq_has_values2 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq2"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values2",
- "__sq2"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra limit :__upper_limit",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from user_extra where 1 != 1",
- "Query": "select id from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and col = :__sq2",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# join on int columns
-"select u.id from user as u join user as uu on u.intcol = uu.intcol"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u_intcol": 1
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.intcol from `user` as u where 1 != 1",
- "Query": "select u.id, u.intcol from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as uu where 1 != 1",
- "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u_intcol": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.intcol, u.id from `user` as u where 1 != 1",
- "Query": "select u.intcol, u.id from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as uu where 1 != 1",
- "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Duplicate output column from derived table having a join
-"select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1"
-"unsupported: expression on results of a cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "t_col1": 0
- },
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1, 0 from `user` where 1 != 1",
- "Query": "select `user`.col1, 0 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.a = :t_col1",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# left join where clauses #2
-"select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Filter",
- "Predicate": "coalesce(user_extra.col, 4) = 5",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.col from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# dont merge unsharded tables from different keyspaces
-"select 1 from main.unsharded join main_2.unsharded_tab"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_unsharded_tab",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
- "Query": "select 1 from unsharded_tab",
- "Table": "unsharded_tab"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from main.unsharded join main_2.unsharded_tab",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "unsharded_unsharded_tab",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main_2",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_tab where 1 != 1",
- "Query": "select 1 from unsharded_tab",
- "Table": "unsharded_tab"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main_2.unsharded_tab"
- ]
-}
-
-# Unsharded join with using
-"select * from unsharded_a join unsharded_b using (propertyId);"
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
- "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
- "Table": "unsharded_a, unsharded_b"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded_a join unsharded_b using (propertyId);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1",
- "Query": "select * from unsharded_a join unsharded_b using (propertyId)",
- "Table": "unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-
-# Column aliases in Derived Table
-"select id2 from (select id from user) as x (id2)"
-"unsupported: column aliases in derived table"
-{
- "QueryType": "SELECT",
- "Original": "select id2 from (select id from user) as x (id2)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id2 from (select id from `user` where 1 != 1) as x(id2) where 1 != 1",
- "Query": "select id2 from (select id from `user`) as x(id2)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# single unsharded keyspace with derived table
-"select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
- "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
- "Table": "unsharded, unsharded_b, unsharded_a"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1",
- "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1",
- "Table": "unsharded, unsharded_a, unsharded_b"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "main.unsharded_b"
- ]
-}
-
-# query builder with derived table having join inside it
-"select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra_user_extra",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra_user_extra",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# left join with expressions
-"select user_extra.col+1 from user left join user_extra on user.col = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
- "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# left join with expressions, with three-way join (different code path)
-"select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, user_extra.col+1 from user left join user_extra on user.col = user_extra.col join user_extra e",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra_user_extra",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1",
- "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as e where 1 != 1",
- "Query": "select 1 from user_extra as e",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# left join with expressions coming from both sides
-"select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "LeftJoin",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0,
- "user_foo": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.foo from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.foo from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_foo + user_extra.col + 1 from user_extra where 1 != 1",
- "Query": "select :user_foo + user_extra.col + 1 from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-Gen4 plan same as above
-
-# Do not rewrite derived expressions when the derived table is merged with the outer
-"select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1"
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC",
- "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*)",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)",
- "OrderBy": "(0|2) ASC",
- "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join with USING construct
-"select * from authoritative join unsharded_authoritative using(col1)"
-"unsupported: join with USING(column_list) clause for complex queries"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative join unsharded_authoritative using(col1)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:3,R:0",
- "JoinVars": {
- "authoritative_col1": 0
- },
- "TableName": "authoritative_unsharded_authoritative",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1",
- "Query": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative",
- "Table": "authoritative"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where 1 != 1",
- "Query": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where unsharded_authoritative.col1 = :authoritative_col1",
- "Table": "unsharded_authoritative"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded_authoritative",
- "user.authoritative"
- ]
-}
-
-
-"select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
- "Query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user`) as t1) as t2) as t3 where push_it = 12",
- "Table": "`user`",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1",
- "Query": "select t3.push_it from (select bar as push_it from (select foo as bar from (select id as foo from `user` where id = 12) as t1) as t2) as t3",
- "Table": "`user`",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.json b/go/vt/vtgate/planbuilder/testdata/large_cases.json
new file mode 100644
index 00000000000..b86dae4b168
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/large_cases.json
@@ -0,0 +1,253 @@
+[
+ {
+ "query": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "user_extra_user_id": 0
+ },
+ "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
+ "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id",
+ "Table": "user_extra",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_metadata where 1 != 1",
+ "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id",
+ "Table": "user_metadata",
+ "Values": [
+ ":user_extra_user_id"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "music_id": 0
+ },
+ "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "unsharded_x": 0
+ },
+ "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.x from unsharded where 1 != 1",
+ "Query": "select unsharded.x from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_a where 1 != 1",
+ "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x",
+ "Table": "unsharded_a"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "unsharded_b_unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_b where 1 != 1",
+ "Query": "select 1 from unsharded_b",
+ "Table": "unsharded_b"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "unsharded_auto_music_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded_auto where 1 != 1",
+ "Query": "select 1 from unsharded_auto",
+ "Table": "unsharded_auto"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music_extra where 1 != 1",
+ "Query": "select 1 from music_extra where music_extra.music_id = :music_id",
+ "Table": "music_extra",
+ "Values": [
+ ":music_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "music, music_extra_`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music, music_extra where 1 != 1",
+ "Query": "select 1 from music, music_extra where music.id = music_extra.music_id",
+ "Table": "music, music_extra"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user`, user_extra, user_metadata where 1 != 1",
+ "Query": "select `user`.id from `user`, user_extra, user_metadata where `user`.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id",
+ "Table": "`user`, user_extra, user_metadata"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where 1 != 1",
+ "Query": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where unsharded.x = unsharded_a.y",
+ "Table": "unsharded, unsharded_a, unsharded_auto, unsharded_b"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_a",
+ "main.unsharded_auto",
+ "main.unsharded_b",
+ "user.music",
+ "user.music_extra",
+ "user.user",
+ "user.user_extra",
+ "user.user_metadata"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.txt b/go/vt/vtgate/planbuilder/testdata/large_cases.txt
deleted file mode 100644
index 5fa57013a00..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/large_cases.txt
+++ /dev/null
@@ -1,249 +0,0 @@
-"select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y"
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "user_extra_user_id": 0
- },
- "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
- "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id",
- "Table": "user_extra",
- "Values": [
- ":user_id"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_metadata where 1 != 1",
- "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id",
- "Table": "user_metadata",
- "Values": [
- ":user_extra_user_id"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "music_id": 0
- },
- "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music",
- "Table": "music"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "unsharded_x": 0
- },
- "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.x from unsharded where 1 != 1",
- "Query": "select unsharded.x from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_a where 1 != 1",
- "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x",
- "Table": "unsharded_a"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "unsharded_b_unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_b where 1 != 1",
- "Query": "select 1 from unsharded_b",
- "Table": "unsharded_b"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "unsharded_auto_music_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded_auto where 1 != 1",
- "Query": "select 1 from unsharded_auto",
- "Table": "unsharded_auto"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music_extra where 1 != 1",
- "Query": "select 1 from music_extra where music_extra.music_id = :music_id",
- "Table": "music_extra",
- "Values": [
- ":music_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "music, music_extra_`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music, music_extra where 1 != 1",
- "Query": "select 1 from music, music_extra where music.id = music_extra.music_id",
- "Table": "music, music_extra"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`, user_extra, user_metadata_unsharded, unsharded_a, unsharded_auto, unsharded_b",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user`, user_extra, user_metadata where 1 != 1",
- "Query": "select `user`.id from `user`, user_extra, user_metadata where `user`.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id",
- "Table": "`user`, user_extra, user_metadata"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where 1 != 1",
- "Query": "select 1 from unsharded, unsharded_a, unsharded_b, unsharded_auto where unsharded.x = unsharded_a.y",
- "Table": "unsharded, unsharded_a, unsharded_auto, unsharded_b"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_a",
- "main.unsharded_auto",
- "main.unsharded_b",
- "user.music",
- "user.music_extra",
- "user.user",
- "user.user_extra",
- "user.user_metadata"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json
new file mode 100644
index 00000000000..9120e39bfd6
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json
@@ -0,0 +1,2592 @@
+[
+ {
+ "comment": "this testcase breaks goland, so it lives on its own file",
+ "query": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270698330)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270699497)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270703806 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270703806)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270714657 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270714657)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270721330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270721330)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270812079 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270812079)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271011532 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271011532)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271034164 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034164)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271034177 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034177)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271066849 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271066849)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271098740 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271098740)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271355000 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271355000)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271924504 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271924504)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272086055 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272086055)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270650576 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270650576)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270652906 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270652906)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270660650 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270660650)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270698330)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270699497)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270649256 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270649256)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270653671 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270653671)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270717223 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270717223)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270720898 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270720898)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271346411 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271346411)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271352121 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271352121)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271354908 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271354908)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271367516 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271367516)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271472522 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271472522)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271821733 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271821733)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272068709 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272068709)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id from music where 1 != 1",
+ "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:2)",
+ "(1:3)"
+ ],
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)",
+ "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11)",
+ "Table": "music",
+ "Values": [
+ "INT64(1270698330)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)",
+ "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11)",
+ "Table": "music",
+ "Values": [
+ "INT64(1270699497)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270703806 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270703806)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270714657 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270714657)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270721330 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270721330)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270812079 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270812079)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271011532 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271011532)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271034164 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034164)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271034177 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271034177)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271066849 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271066849)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271098740 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271098740)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271355000 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271355000)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271924504 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271924504)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272086055 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272086055)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270650576 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270650576)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270652906 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270652906)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270660650 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270660650)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270707364 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270707364)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271799956 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271799956)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270637436)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270644941 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270644941)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270649256 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270649256)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270653671 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270653671)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270670201 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270670201)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270717223 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270717223)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270720898 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270720898)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271346411 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271346411)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271352121 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271352121)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271354908 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271354908)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271367516 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271367516)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271472522 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271472522)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271639345)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271821733 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271821733)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271914117)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272068709 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272068709)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272127855 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272127855)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272191137 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272191137)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272468271 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272468271)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1270982590)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271365691)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1271607757)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1",
+ "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272244005 order by created_at asc, id asc limit 11",
+ "Table": "music",
+ "Values": [
+ "INT64(1272244005)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.json b/go/vt/vtgate/planbuilder/testdata/lock_cases.json
new file mode 100644
index 00000000000..5d64c41465d
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/lock_cases.json
@@ -0,0 +1,193 @@
+[
+ {
+ "comment": "get_lock from dual",
+ "query": "select get_lock('xyz', 10) from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10) from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10) from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "is_free_lock from dual",
+ "query": "select is_free_lock('xyz') from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select is_free_lock('xyz') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
+ "lock_func": [
+ "is_free_lock('xyz')"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select is_free_lock('xyz') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
+ "lock_func": [
+ "is_free_lock('xyz')"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "get_lock from dual prepare query",
+ "query": "select get_lock(?, ?)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock(?, ?)",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock(:v1, :v2)"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock(?, ?)",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
+ "lock_func": [
+ "get_lock(:v1, :v2)"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "lock tables read",
+ "query": "lock tables t as x read local",
+ "plan": {
+ "QueryType": "LOCK_TABLES",
+ "Original": "lock tables t as x read local",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "lock tables write",
+ "query": "lock tables t low_priority write",
+ "plan": {
+ "QueryType": "LOCK_TABLES",
+ "Original": "lock tables t low_priority write",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "unlock tables",
+ "query": "unlock tables",
+ "plan": {
+ "QueryType": "UNLOCK_TABLES",
+ "Original": "unlock tables",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "multiple lock functions",
+ "query": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)",
+ "is_free_lock('abc')"
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
+ "Instructions": {
+ "OperatorType": "Lock",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "KeyspaceID(00)",
+ "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
+ "lock_func": [
+ "get_lock('xyz', 10)",
+ "is_free_lock('abc')"
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.txt b/go/vt/vtgate/planbuilder/testdata/lock_cases.txt
deleted file mode 100644
index 765c4c27568..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/lock_cases.txt
+++ /dev/null
@@ -1,186 +0,0 @@
-# get_lock from dual
-"select get_lock('xyz', 10) from dual"
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10) from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10) from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# is_free_lock from dual
-"select is_free_lock('xyz') from dual"
-{
- "QueryType": "SELECT",
- "Original": "select is_free_lock('xyz') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
- "lock_func": [
- "is_free_lock('xyz')"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select is_free_lock('xyz') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1",
- "lock_func": [
- "is_free_lock('xyz')"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# get_lock from dual prepare query
-"select get_lock(?, ?)"
-{
- "QueryType": "SELECT",
- "Original": "select get_lock(?, ?)",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
- "lock_func": [
- "get_lock(:v1, :v2)"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select get_lock(?, ?)",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1",
- "lock_func": [
- "get_lock(:v1, :v2)"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# lock tables read
-"lock tables t as x read local"
-{
- "QueryType": "LOCK_TABLES",
- "Original": "lock tables t as x read local",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# lock tables write
-"lock tables t low_priority write"
-{
- "QueryType": "LOCK_TABLES",
- "Original": "lock tables t low_priority write",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# unlock tables
-"unlock tables"
-{
- "QueryType": "UNLOCK_TABLES",
- "Original": "unlock tables",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# multiple lock functions
-"select get_lock('xyz', 10), is_free_lock('abc') from dual"
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)",
- "is_free_lock('abc')"
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual",
- "Instructions": {
- "OperatorType": "Lock",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "KeyspaceID(00)",
- "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1",
- "lock_func": [
- "get_lock('xyz', 10)",
- "is_free_lock('abc')"
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
new file mode 100644
index 00000000000..466165d6a00
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json
@@ -0,0 +1,1161 @@
+[
+ {
+ "comment": "Test cases in this file follow the code in memory_sort.go.\n# scatter aggregate order by references ungrouped column",
+ "query": "select a, b, count(*) from user group by a order by b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by a order by b",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|4) ASC",
+ "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) from user group by a order by b",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS count(*)",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(1|4) ASC, (0|3) ASC",
+ "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by b asc, a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate order by references aggregate expression",
+ "query": "select a, b, count(*) k from user group by a order by k",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "query": "select a, b, count(*) k from user group by a order by b, a, k",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by b, a, k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "ResultColumns": 5,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|4) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 5,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by b, a, k",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC, (0|3) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with memory sort and limit",
+ "query": "select a, b, count(*) k from user group by a order by k desc limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 DESC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 DESC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with memory sort and order by number",
+ "query": "select a, b, count(*) k from user group by a order by 1,3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by 1,3",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(2) AS count",
+ "GroupBy": "0",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by 1 asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, b, count(*) k from user group by a order by 1,3",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, 2 ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
+ "GroupBy": "(0|3)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter aggregate with memory sort and order by number, reuse weight_string\n# we have to use a meaningless construct to test this. TODO: improve to do ordering once for textcol1",
+ "query": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|2) ASC, 1 ASC, (0|2) ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(1) AS count",
+ "GroupBy": "2",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1, weight_string(textcol1)",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1, weight_string(textcol1) order by textcol1 asc, textcol1 asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 1 ASC, 0 ASC COLLATE latin1_swedish_ci",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS k",
+ "GroupBy": "0 COLLATE latin1_swedish_ci",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select textcol1 as t, count(*) as k from `user` where 1 != 1 group by textcol1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select textcol1 as t, count(*) as k from `user` group by textcol1 order by textcol1 asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by on a cross-shard derived table",
+ "query": "select id from (select user.id, user.col from user join user_extra) as t order by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|2) ASC",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "order by on a cross-shard query. Note: this happens only when an order by column is from the second table",
+ "query": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|3) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,R:1",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|3) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0,R:1",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by for join, with mixed cross-shard ordering",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3",
+ "JoinVars": {
+ "user_id": 4
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0,L:3,R:1,L:4",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
+ "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by for join, on text column in LHS.",
+ "query": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by for join, on text column in RHS.",
+ "query": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
+ "TableName": "unsharded_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
+ "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
+ "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by for vindex func",
+ "query": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "unary expression",
+ "query": "select a from user order by binary a desc",
+ "v3-plan": "unsupported: in scatter query: complex order by expression: convert(a, binary)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a from user order by binary a desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` where 1 != 1",
+ "OrderBy": "(1|2) DESC",
+ "Query": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` order by convert(a, binary) desc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unary expression in join query",
+ "query": "select u.a from user u join music m on u.a = m.a order by binary a desc",
+ "v3-plan": "unsupported: in scatter query: complex order by expression: convert(a, binary)",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.a from user u join music m on u.a = m.a order by binary a desc",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u_a": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u where 1 != 1",
+ "OrderBy": "(1|2) DESC",
+ "Query": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u order by convert(a, binary) desc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music as m where 1 != 1",
+ "Query": "select 1 from music as m where m.a = :u_a",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "intcol order by",
+ "query": "select id, intcol from user order by intcol",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, intcol from user order by intcol",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, intcol from `user` where 1 != 1",
+ "OrderBy": "1 ASC",
+ "Query": "select id, intcol from `user` order by intcol asc",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, intcol from user order by intcol",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, intcol from `user` where 1 != 1",
+ "OrderBy": "1 ASC",
+ "Query": "select id, intcol from `user` order by intcol asc",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter order by with order by column not present",
+ "query": "select col from user order by id",
+ "v3-plan": "unsupported: in scatter query: order by must reference a column in the select list: id asc",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select col, id, weight_string(id) from `user` order by id asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt
deleted file mode 100644
index ec49c4e5c33..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.txt
+++ /dev/null
@@ -1,1144 +0,0 @@
-# Test cases in this file follow the code in memory_sort.go.
-# scatter aggregate order by references ungrouped column
-"select a, b, count(*) from user group by a order by b"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by a order by b",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|4) ASC",
- "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) from user group by a order by b",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS count(*)",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(1|4) ASC, (0|3) ASC",
- "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by b asc, a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate order by references aggregate expression
-"select a, b, count(*) k from user group by a order by k"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-"select a, b, count(*) k from user group by a order by b, a, k"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by b, a, k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "ResultColumns": 5,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|4) ASC",
- "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 5,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by b, a, k",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 ASC, (0|3) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with memory sort and limit
-"select a, b, count(*) k from user group by a order by k desc limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 DESC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by k desc limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 DESC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with memory sort and order by number
-"select a, b, count(*) k from user group by a order by 1,3"
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by 1,3",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(2) AS count",
- "GroupBy": "0",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by 1 asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, b, count(*) k from user group by a order by 1,3",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, 2 ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(1) AS b, sum_count_star(2) AS k",
- "GroupBy": "(0|3)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)",
- "OrderBy": "(0|3) ASC",
- "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter aggregate with memory sort and order by number, reuse weight_string
-# we have to use a meaningless construct to test this. TODO: improve to do ordering once for textcol1
-"select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1"
-{
- "QueryType": "SELECT",
- "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|2) ASC, 1 ASC, (0|2) ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(1) AS count",
- "GroupBy": "2",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1, weight_string(textcol1)",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1, weight_string(textcol1) order by textcol1 asc, textcol1 asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 1 ASC, 0 ASC COLLATE latin1_swedish_ci",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS k",
- "GroupBy": "0 COLLATE latin1_swedish_ci",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select textcol1 as t, count(*) as k from `user` where 1 != 1 group by textcol1",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
- "Query": "select textcol1 as t, count(*) as k from `user` group by textcol1 order by textcol1 asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by on a cross-shard derived table
-"select id from (select user.id, user.col from user join user_extra) as t order by id"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|2) ASC",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# order by on a cross-shard query. Note: this happens only when an order by column is from the second table
-"select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|3) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,R:1",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|3) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0,R:1",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Order by for join, with mixed cross-shard ordering
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3",
- "JoinVars": {
- "user_id": 4
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0,L:3,R:1,L:4",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1",
- "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Order by for join, on text column in LHS.
-"select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2"
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Order by for join, on text column in RHS.
-"select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2"
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1",
- "TableName": "unsharded_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1",
- "Query": "select un.col2, weight_string(un.col2) from unsharded as un",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1",
- "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# order by for vindex func
-"select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start"
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 ASC",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user_index"
- ]
-}
-
-# unary expression
-"select a from user order by binary a desc"
-"unsupported: in scatter query: complex order by expression: convert(a, binary)"
-{
- "QueryType": "SELECT",
- "Original": "select a from user order by binary a desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` where 1 != 1",
- "OrderBy": "(1|2) DESC",
- "Query": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` order by convert(a, binary) desc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# unary expression in join query
-"select u.a from user u join music m on u.a = m.a order by binary a desc"
-"unsupported: in scatter query: complex order by expression: convert(a, binary)"
-{
- "QueryType": "SELECT",
- "Original": "select u.a from user u join music m on u.a = m.a order by binary a desc",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u_a": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u where 1 != 1",
- "OrderBy": "(1|2) DESC",
- "Query": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u order by convert(a, binary) desc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music as m where 1 != 1",
- "Query": "select 1 from music as m where m.a = :u_a",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# intcol order by
-"select id, intcol from user order by intcol"
-{
- "QueryType": "SELECT",
- "Original": "select id, intcol from user order by intcol",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, intcol from `user` where 1 != 1",
- "OrderBy": "1 ASC",
- "Query": "select id, intcol from `user` order by intcol asc",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, intcol from user order by intcol",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, intcol from `user` where 1 != 1",
- "OrderBy": "1 ASC",
- "Query": "select id, intcol from `user` order by intcol asc",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter order by with order by column not present
-"select col from user order by id"
-"unsupported: in scatter query: order by must reference a column in the select list: id asc"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select col, id, weight_string(id) from `user` order by id asc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/migration_cases.json b/go/vt/vtgate/planbuilder/testdata/migration_cases.json
new file mode 100644
index 00000000000..9978120bf6b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/migration_cases.json
@@ -0,0 +1,103 @@
+[
+ {
+ "comment": "revert migration",
+ "query": "revert vitess_migration 'abc'",
+ "plan": {
+ "QueryType": "REVERT",
+ "Original": "revert vitess_migration 'abc'",
+ "Instructions": {
+ "OperatorType": "RevertMigration",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "query": "revert vitess_migration 'abc'"
+ }
+ }
+ },
+ {
+ "comment": "retry migration",
+ "query": "alter vitess_migration 'abc' retry",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' retry",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' retry"
+ }
+ }
+ },
+ {
+ "comment": "complete migration",
+ "query": "alter vitess_migration 'abc' complete",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' complete",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' complete"
+ }
+ }
+ },
+ {
+ "comment": "complete migration",
+ "query": "alter vitess_migration 'abc' cleanup",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' cleanup",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' cleanup"
+ }
+ }
+ },
+ {
+ "comment": "cancel migration",
+ "query": "alter vitess_migration 'abc' cancel",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration 'abc' cancel",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration 'abc' cancel"
+ }
+ }
+ },
+ {
+ "comment": "cancel all migrations",
+ "query": "alter vitess_migration cancel all",
+ "plan": {
+ "QueryType": "UNKNOWN",
+ "Original": "alter vitess_migration cancel all",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "alter vitess_migration cancel all"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/migration_cases.txt b/go/vt/vtgate/planbuilder/testdata/migration_cases.txt
deleted file mode 100644
index fc08cfe7d07..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/migration_cases.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-# revert migration
-"revert vitess_migration 'abc'"
-{
- "QueryType": "REVERT",
- "Original": "revert vitess_migration 'abc'",
- "Instructions": {
- "OperatorType": "RevertMigration",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "query": "revert vitess_migration 'abc'"
- }
-}
-Gen4 plan same as above
-
-# retry migration
-"alter vitess_migration 'abc' retry"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' retry",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' retry"
- }
-}
-Gen4 plan same as above
-
-# complete migration
-"alter vitess_migration 'abc' complete"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' complete",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' complete"
- }
-}
-Gen4 plan same as above
-
-# complete migration
-"alter vitess_migration 'abc' cleanup"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' cleanup",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' cleanup"
- }
-}
-Gen4 plan same as above
-
-# cancel migration
-"alter vitess_migration 'abc' cancel"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration 'abc' cancel",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration 'abc' cancel"
- }
-}
-Gen4 plan same as above
-
-# cancel all migrations
-"alter vitess_migration cancel all"
-{
- "QueryType": "UNKNOWN",
- "Original": "alter vitess_migration cancel all",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "alter vitess_migration cancel all"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json
new file mode 100644
index 00000000000..88717292379
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json
@@ -0,0 +1,407 @@
+[
+ {
+ "comment": "OLTP simple select",
+ "query": "SELECT c FROM sbtest34 WHERE id=15",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest34 WHERE id=15",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest34 where 1 != 1",
+ "Query": "select c from sbtest34 where id = 15",
+ "Table": "sbtest34",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest34 WHERE id=15",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest34 where 1 != 1",
+ "Query": "select c from sbtest34 where id = 15",
+ "Table": "sbtest34",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest34"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP simple range select",
+ "query": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest12 where 1 != 1",
+ "Query": "select c from sbtest12 where id between 1 and 10",
+ "Table": "sbtest12"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest12 where 1 != 1",
+ "Query": "select c from sbtest12 where id between 1 and 10",
+ "Table": "sbtest12"
+ },
+ "TablesUsed": [
+ "main.sbtest12"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP sum range select",
+ "query": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
+ "Query": "select sum(k) from sbtest43 where id between 90 and 990",
+ "Table": "sbtest43"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS sum(k)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
+ "Query": "select sum(k) from sbtest43 where id between 90 and 990",
+ "Table": "sbtest43"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.sbtest43"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP order range select",
+ "query": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c, weight_string(c) from sbtest1 where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select c, weight_string(c) from sbtest1 where id between 50 and 235 order by c asc",
+ "ResultColumns": 1,
+ "Table": "sbtest1"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c from sbtest1 where 1 != 1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select c from sbtest1 where id between 50 and 235 order by c asc",
+ "Table": "sbtest1"
+ },
+ "TablesUsed": [
+ "main.sbtest1"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP distinct range select",
+ "query": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "1",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc",
+ "ResultColumns": 2,
+ "Table": "sbtest30"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1) COLLATE latin1_swedish_ci",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
+ "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 0 ASC COLLATE latin1_swedish_ci",
+ "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc, c asc",
+ "Table": "sbtest30"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.sbtest30"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP index udpate",
+ "query": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest6 set k = k + 1 where id = 5",
+ "Table": "sbtest6",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest6"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest6 set k = k + 1 where id = 5",
+ "Table": "sbtest6",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest6"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP non index update",
+ "query": "UPDATE sbtest9 SET c=7 WHERE id=8",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest9 set c = 7 where id = 8",
+ "Table": "sbtest9",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest9"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update sbtest9 set c = 7 where id = 8",
+ "Table": "sbtest9",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest9"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP delete",
+ "query": "DELETE FROM sbtest15 WHERE id=7525",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM sbtest15 WHERE id=7525",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from sbtest15 where id = 7525",
+ "Table": "sbtest15",
+ "Values": [
+ "INT64(7525)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest15"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM sbtest15 WHERE id=7525",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from sbtest15 where id = 7525",
+ "Table": "sbtest15",
+ "Values": [
+ "INT64(7525)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.sbtest15"
+ ]
+ }
+ },
+ {
+ "comment": "OLTP insert",
+ "query": "INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into sbtest16(id, k, c, pad) values (:_id_0, 1, 2, 50)",
+ "TableName": "sbtest16",
+ "VindexValues": {
+ "hash": "INT64(42)"
+ }
+ },
+ "TablesUsed": [
+ "main.sbtest16"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.txt b/go/vt/vtgate/planbuilder/testdata/oltp_cases.txt
deleted file mode 100644
index 42aefb3fd4d..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/oltp_cases.txt
+++ /dev/null
@@ -1,396 +0,0 @@
-# OLTP simple select
-"SELECT c FROM sbtest34 WHERE id=15"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest34 WHERE id=15",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest34 where 1 != 1",
- "Query": "select c from sbtest34 where id = 15",
- "Table": "sbtest34",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest34 WHERE id=15",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest34 where 1 != 1",
- "Query": "select c from sbtest34 where id = 15",
- "Table": "sbtest34",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest34"
- ]
-}
-
-# OLTP simple range select
-"SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest12 where 1 != 1",
- "Query": "select c from sbtest12 where id between 1 and 10",
- "Table": "sbtest12"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest12 where 1 != 1",
- "Query": "select c from sbtest12 where id between 1 and 10",
- "Table": "sbtest12"
- },
- "TablesUsed": [
- "main.sbtest12"
- ]
-}
-
-# OLTP sum range select
-"SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990"
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
- "Query": "select sum(k) from sbtest43 where id between 90 and 990",
- "Table": "sbtest43"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS sum(k)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(k) from sbtest43 where 1 != 1",
- "Query": "select sum(k) from sbtest43 where id between 90 and 990",
- "Table": "sbtest43"
- }
- ]
- },
- "TablesUsed": [
- "main.sbtest43"
- ]
-}
-
-# OLTP order range select
-"SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c, weight_string(c) from sbtest1 where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select c, weight_string(c) from sbtest1 where id between 50 and 235 order by c asc",
- "ResultColumns": 1,
- "Table": "sbtest1"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c from sbtest1 where 1 != 1",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci",
- "Query": "select c from sbtest1 where id between 50 and 235 order by c asc",
- "Table": "sbtest1"
- },
- "TablesUsed": [
- "main.sbtest1"
- ]
-}
-
-# OLTP distinct range select
-"SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c"
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "1",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc",
- "ResultColumns": 2,
- "Table": "sbtest30"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|1) COLLATE latin1_swedish_ci",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1",
- "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 0 ASC COLLATE latin1_swedish_ci",
- "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc, c asc",
- "Table": "sbtest30"
- }
- ]
- },
- "TablesUsed": [
- "main.sbtest30"
- ]
-}
-
-# OLTP index udpate
-"UPDATE sbtest6 SET k=k+1 WHERE id=5"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest6 set k = k + 1 where id = 5",
- "Table": "sbtest6",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest6"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest6 set k = k + 1 where id = 5",
- "Table": "sbtest6",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest6"
- ]
-}
-
-# OLTP non index update
-"UPDATE sbtest9 SET c=7 WHERE id=8"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest9 set c = 7 where id = 8",
- "Table": "sbtest9",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest9"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE sbtest9 SET c=7 WHERE id=8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update sbtest9 set c = 7 where id = 8",
- "Table": "sbtest9",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest9"
- ]
-}
-
-# OLTP delete
-"DELETE FROM sbtest15 WHERE id=7525"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM sbtest15 WHERE id=7525",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from sbtest15 where id = 7525",
- "Table": "sbtest15",
- "Values": [
- "INT64(7525)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest15"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM sbtest15 WHERE id=7525",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from sbtest15 where id = 7525",
- "Table": "sbtest15",
- "Values": [
- "INT64(7525)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.sbtest15"
- ]
-}
-
-# OLTP insert
-"INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO sbtest16 (id, k, c, pad) VALUES (42, 1, 2, 50)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into sbtest16(id, k, c, pad) values (:_id_0, 1, 2, 50)",
- "TableName": "sbtest16",
- "VindexValues": {
- "hash": "INT64(42)"
- }
- },
- "TablesUsed": [
- "main.sbtest16"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/onecase.json b/go/vt/vtgate/planbuilder/testdata/onecase.json
new file mode 100644
index 00000000000..da7543f706a
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/onecase.json
@@ -0,0 +1,9 @@
+[
+ {
+ "comment": "Add your test case here for debugging and run go test -run=One.",
+ "query": "",
+ "plan": {
+
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/onecase.txt b/go/vt/vtgate/planbuilder/testdata/onecase.txt
deleted file mode 100644
index e819513f354..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/onecase.txt
+++ /dev/null
@@ -1 +0,0 @@
-# Add your test case here for debugging and run go test -run=One.
diff --git a/go/vt/vtgate/planbuilder/testdata/other_admin_cases.json b/go/vt/vtgate/planbuilder/testdata/other_admin_cases.json
new file mode 100644
index 00000000000..2eb3432e1b7
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/other_admin_cases.json
@@ -0,0 +1,56 @@
+[
+ {
+ "comment": "Repair statement",
+ "query": "repair table t1,t2 quick",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "repair table t1,t2 quick",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "repair table t1,t2 quick",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Optimize statement",
+ "query": "optimize table t1",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "optimize table t1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "optimize table t1",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "DO statement",
+ "query": "DO 1",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "DO 1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "DO 1",
+ "SingleShardOnly": true
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/other_admin_cases.txt b/go/vt/vtgate/planbuilder/testdata/other_admin_cases.txt
deleted file mode 100644
index e5f965ee1b6..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/other_admin_cases.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-# Repair statement
-"repair table t1,t2 quick"
-{
- "QueryType": "OTHER",
- "Original": "repair table t1,t2 quick",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "repair table t1,t2 quick",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Optimize statement
-"optimize table t1"
-{
- "QueryType": "OTHER",
- "Original": "optimize table t1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "optimize table t1",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# DO statement
-"DO 1"
-{
- "QueryType": "OTHER",
- "Original": "DO 1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "DO 1",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
new file mode 100644
index 00000000000..1f19139b5e7
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json
@@ -0,0 +1,131 @@
+[
+ {
+ "comment": "Explain statement",
+ "query": "explain select * from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain select * from user",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain select * from `user`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Explain Vitess statement",
+ "query": "explain format=vitess select * from user",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain format=vitess select * from user",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "Analyze statement",
+ "query": "analyze table t1",
+ "plan": {
+ "QueryType": "OTHER",
+ "Original": "analyze table t1",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "analyze table t1",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Describe statement",
+ "query": "describe select * from t",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "describe select * from t",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain select * from t",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Desc statement",
+ "query": "desc select * from t",
+ "plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "desc select * from t",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "explain select * from t",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "query": "explain format=vtexplain select * from user",
+ "v3-plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain format=vtexplain select * from user",
+ "Instructions": {
+ "OperatorType": "VTEXPLAIN",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "EXPLAIN",
+ "Original": "explain format=vtexplain select * from user",
+ "Instructions": {
+ "OperatorType": "VTEXPLAIN",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt b/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt
deleted file mode 100644
index 0866d9df34b..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/other_read_cases.txt
+++ /dev/null
@@ -1,127 +0,0 @@
-# Explain statement
-"explain select * from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain select * from user",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "explain select * from `user`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Explain Vitess statement
-"explain format=vitess select * from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain format=vitess select * from user",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# Analyze statement
-"analyze table t1"
-{
- "QueryType": "OTHER",
- "Original": "analyze table t1",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "analyze table t1",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Describe statement
-"describe select * from t"
-{
- "QueryType": "EXPLAIN",
- "Original": "describe select * from t",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "explain select * from t",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Desc statement
-"desc select * from t"
-{
- "QueryType": "EXPLAIN",
- "Original": "desc select * from t",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "explain select * from t",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-"explain format=vtexplain select * from user"
-{
- "QueryType": "EXPLAIN",
- "Original": "explain format=vtexplain select * from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "EXPLAIN",
- "Original": "explain format=vtexplain select * from user",
- "Instructions": {
- "OperatorType": "VTEXPLAIN",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "main.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
new file mode 100644
index 00000000000..8cb508049ef
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json
@@ -0,0 +1,3117 @@
+[
+ {
+ "comment": "HAVING implicitly references table col",
+ "query": "select user.col1 from user having col2 = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 from user having col2 = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user` having col2 = 2",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 from user having col2 = 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user` where col2 = 2",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous symbol reference",
+ "query": "select user.col1, user_extra.col1 from user join user_extra having col1 = 2",
+ "v3-plan": "ambiguous symbol reference: col1",
+ "gen4-plan": "Column 'col1' in field list is ambiguous"
+ },
+ {
+ "comment": "TODO: this should be 'Column 'col1' in having clause is ambiguous'\n# non-ambiguous symbol reference",
+ "query": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col1 from user_extra having user_extra.col1 = 2",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
+ "Query": "select `user`.col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
+ "Query": "select user_extra.col1 from user_extra where user_extra.col1 = 2",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING multi-route",
+ "query": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
+ "Query": "select user_extra.col3 from user_extra having user_extra.col3 = 1",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2 from `user` where `user`.col1 = 1 and `user`.col1 = `user`.col2 and 1 = 1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
+ "Query": "select user_extra.col3 from user_extra where user_extra.col3 = 1 and 1 = 1",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "HAVING uses subquery",
+ "query": "select id from user having id in (select col from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user having id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user having id in (select col from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY, reference col from local table.",
+ "query": "select col from user where id = 5 order by aa",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 5 order by aa",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 5 order by aa asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 5 order by aa",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 5 order by aa asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY uses column numbers",
+ "query": "select col from user where id = 1 order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 order by 1 asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where id = 1 order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where id = 1 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter",
+ "query": "select col from user order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` order by col asc",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` order by col asc",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on select t.*",
+ "query": "select t.*, t.col from user t order by t.col",
+ "v3-plan": "unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY on select *",
+ "query": "select *, col from user order by col",
+ "v3-plan": "unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY on select multi t.*",
+ "query": "select t.*, t.name, t.*, t.col from user t order by t.col",
+ "v3-plan": "unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY on select multi *",
+ "query": "select *, name, *, col from user order by col",
+ "v3-plan": "unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list",
+ "gen4-plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "ORDER BY works for select * from authoritative table",
+ "query": "select * from authoritative order by user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
+ "ResultColumns": 3,
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
+ "OrderBy": "(0|3) ASC",
+ "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
+ "ResultColumns": 3,
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY works for select * from authoritative table",
+ "query": "select * from authoritative order by col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc",
+ "ResultColumns": 3,
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative order by col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
+ "OrderBy": "1 ASC COLLATE latin1_swedish_ci",
+ "Query": "select user_id, col1, col2 from authoritative order by col1 asc",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter with text column",
+ "query": "select a, textcol1, b from user order by a, textcol1, b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
+ "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "Query": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter with text column, qualified name TODO: can plan better",
+ "query": "select a, user.textcol1, b from user order by a, textcol1, b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
+ "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
+ "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
+ "ResultColumns": 3,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY on scatter with multiple text columns",
+ "query": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1",
+ "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC",
+ "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` where 1 != 1",
+ "OrderBy": "(0|4) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|5) ASC, 3 ASC COLLATE latin1_swedish_ci",
+ "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
+ "ResultColumns": 4,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY invalid col number on scatter",
+ "query": "select col from user order by 2",
+ "plan": "Unknown column '2' in 'order clause'"
+ },
+ {
+ "comment": "ORDER BY column offset",
+ "query": "select id as foo from music order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by 1 asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by foo asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY NULL",
+ "query": "select col from user order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by null",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by null",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by null",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY after pull-out subquery",
+ "query": "select col from user where col in (select col2 from user) order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by col",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by col",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "OrderBy": "0 ASC",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY NULL for join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id order by null",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY non-key column for join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY non-key column for implicit join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY NULL after pull-out subquery",
+ "query": "select col from user where col in (select col2 from user) order by null",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by null",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by null",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY RAND()",
+ "query": "select col from user order by RAND()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by RAND()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by RAND()",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user order by RAND()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` order by RAND()",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY RAND() for join",
+ "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_id": 2
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by RAND()",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1 order by RAND()",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.col3 from music where 1 != 1",
+ "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
+ "Table": "music",
+ "Values": [
+ ":user_id"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ORDER BY RAND() after pull-out subquery",
+ "query": "select col from user where col in (select col2 from user) order by rand()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by rand()",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col2 from user) order by rand()",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col2 from `user` where 1 != 1",
+ "Query": "select col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, '*' expression",
+ "query": "select * from user where id = 5 order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, qualified '*' expression",
+ "query": "select user.* from user where id = 5 order by user.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, '*' expression with qualified reference",
+ "query": "select * from user where id = 5 order by user.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, '*' expression in a subquery",
+ "query": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_col": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
+ "Query": "select u.id, u.col from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.col = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
+ "Query": "select u.col, u.id from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.col = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, verify outer symtab is searched according to its own context.",
+ "query": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)",
+ "v3-plan": "symbol u.col not found in subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id from `user` as u where 1 != 1",
+ "Query": "select u.id from `user` as u where u.id in (select col2 from `user` where `user`.id = u.id order by u.col asc)",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by, qualified '*' expression, name mismatched.",
+ "query": "select user.* from user where id = 5 order by e.col",
+ "plan": "symbol e.col not found"
+ },
+ {
+ "comment": "Order by, invalid column number",
+ "query": "select col from user order by 18446744073709551616",
+ "plan": "error parsing column number: 18446744073709551616"
+ },
+ {
+ "comment": "Order by, out of range column number",
+ "query": "select col from user order by 2",
+ "plan": "Unknown column '2' in 'order clause'"
+ },
+ {
+ "comment": "Order by, '*' expression with qualified reference and using collate",
+ "query": "select * from user where id = 5 order by user.col collate utf8_general_ci",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by with math functions",
+ "query": "select * from user where id = 5 order by -col1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by -col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by -col1 asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by -col1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by -col1 asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by with string operations",
+ "query": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by with math operations",
+ "query": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by derived table column",
+ "query": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1",
+ "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` as u, (select user_id from user_extra where 1 != 1) as eu where 1 != 1",
+ "Query": "select * from `user` as u, (select user_id from user_extra where user_id = 5) as eu where u.id = 5 and u.id = eu.user_id order by eu.user_id asc",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: order by gets pushed for routes",
+ "query": "select col from route1 where id = 1 order by col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from route1 where id = 1 order by col",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` as route1 where 1 != 1",
+ "Query": "select col from `user` as route1 where id = 1 order by col asc",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "LIMIT",
+ "query": "select col1 from user where id = 1 limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1 from user where id = 1 limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user` where id = 1 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1 from user where id = 1 limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user` where id = 1 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "limit for joins. Can't push down the limit because result\n# counts get multiplied by join operations.",
+ "query": "select user.col from user join user_extra limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "limit for scatter",
+ "query": "select col from user limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "limit for scatter with bind var",
+ "query": "select col from user limit :a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit :a",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": ":a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user limit :a",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": ":a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "cross-shard expression in parenthesis with limit",
+ "query": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "scatter limit after pullout subquery",
+ "query": "select col from user where col in (select col1 from user) limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col1 from user) limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where col in (select col1 from user) limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1 from `user` where 1 != 1",
+ "Query": "select col1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "limit on reference table",
+ "query": "select col from ref limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref limit 1",
+ "Table": "ref"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from ref limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from ref where 1 != 1",
+ "Query": "select col from ref limit 1",
+ "Table": "ref"
+ },
+ "TablesUsed": [
+ "user.ref"
+ ]
+ }
+ },
+ {
+ "comment": "arithmetic limit",
+ "query": "select id from user limit 1+1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user limit 1+1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user limit 1+1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by column alias",
+ "query": "select id as foo from music order by foo",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by foo asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo from music order by foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id as foo, weight_string(id) from music order by foo asc",
+ "ResultColumns": 1,
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "column alias for a table column in order by",
+ "query": "select id as foo, id2 as id from music order by id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo, id2 as id from music order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
+ "ResultColumns": 2,
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id as foo, id2 as id from music order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
+ "ResultColumns": 2,
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "ordering on the left side of the join",
+ "query": "select name from user, music order by name",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select name from user, music order by name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select name from user, music order by name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column without group by",
+ "query": "select count(id), num from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num from `user` where 1 != 1",
+ "Query": "select count(id), num from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num from `user` where 1 != 1",
+ "Query": "select count(id), num from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column with order by",
+ "query": "select count(id), num from user order by 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user order by 2",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(1|2) ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
+ "Query": "select count(id), num, weight_string(num) from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user order by 2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` order by num asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column with group by",
+ "query": "select count(id), num from user group by 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count",
+ "GroupBy": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count(id)",
+ "GroupBy": "(1|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "aggregation and non-aggregations column with group by and order by",
+ "query": "select count(id), num from user group by 2 order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2 order by 1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count",
+ "GroupBy": "1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(id), num from user group by 2 order by 1",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count(0) AS count(id)",
+ "GroupBy": "(1|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join order by with ambiguous column reference ; valid in MySQL",
+ "query": "select name, name from user, music order by name",
+ "v3-plan": "ambiguous symbol reference: `name`",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select name, name from user, music order by name",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:0",
+ "TableName": "`user`_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "order by with ambiguous column reference ; valid in MySQL",
+ "query": "select id, id from user order by id",
+ "v3-plan": "ambiguous symbol reference: id",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, id from user order by id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select id, id, weight_string(id) from `user` order by id asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Scatter order by and aggregation: order by column must reference column from select list",
+ "query": "select col, count(*) from user group by col order by c1",
+ "v3-plan": "unsupported: memory sort: order by must reference a column in the select list: c1 asc",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col, count(*) from user group by col order by c1",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count(*), random(2) AS c1",
+ "GroupBy": "0",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col, count(*), c1, weight_string(c1) from `user` where 1 != 1 group by col",
+ "OrderBy": "(2|3) ASC, 0 ASC",
+ "Query": "select col, count(*), c1, weight_string(c1) from `user` group by col order by c1 asc, col asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Distinct with cross shard query",
+ "query": "select distinct user.a from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.a from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a from `user` where 1 != 1",
+ "Query": "select `user`.a from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct user.a from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|1)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select `user`.a, weight_string(`user`.a) from `user` order by `user`.a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Distinct with column alias",
+ "query": "select distinct a as c, a from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a as c, a from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0, 1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a as c, a from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), (1|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Distinct with same column",
+ "query": "select distinct a, a from user",
+ "v3-plan": "generating order by clause: ambiguous symbol reference: a",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select distinct a, a from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "(0|2), (1|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC, (0|2) ASC",
+ "Query": "select distinct a, a, weight_string(a) from `user` order by a asc, a asc",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Order by has subqueries",
+ "query": "select id from unsharded order by (select id from unsharded)",
+ "v3-plan": "unsupported: subqueries disallowed in GROUP or ORDER BY",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from unsharded order by (select id from unsharded)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded order by (select id from unsharded) asc",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Equal filter with hexadecimal value",
+ "query": "select count(*) a from user having a = 0x01",
+ "v3-plan": "unsupported: filtering on results of aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) a from user having a = 0x01",
+ "Instructions": {
+ "OperatorType": "Filter",
+ "Predicate": ":0 = 0x01",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS a",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as a from `user` where 1 != 1",
+ "Query": "select count(*) as a from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt
deleted file mode 100644
index 9e96746f5b6..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.txt
+++ /dev/null
@@ -1,3055 +0,0 @@
-# HAVING implicitly references table col
-"select user.col1 from user having col2 = 2"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 from user having col2 = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user` having col2 = 2",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 from user having col2 = 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user` where col2 = 2",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ambiguous symbol reference
-"select user.col1, user_extra.col1 from user join user_extra having col1 = 2"
-"ambiguous symbol reference: col1"
-Gen4 error: Column 'col1' in field list is ambiguous
-
-# TODO: this should be 'Column 'col1' in having clause is ambiguous'
-# non-ambiguous symbol reference
-"select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
- "Query": "select user_extra.col1 from user_extra having user_extra.col1 = 2",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 from `user` where 1 != 1",
- "Query": "select `user`.col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1",
- "Query": "select user_extra.col1 from user_extra where user_extra.col1 = 2",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# HAVING multi-route
-"select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
- "Query": "select user_extra.col3 from user_extra having user_extra.col3 = 1",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2 from `user` where `user`.col1 = 1 and `user`.col1 = `user`.col2 and 1 = 1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1",
- "Query": "select user_extra.col3 from user_extra where user_extra.col3 = 1 and 1 = 1",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# HAVING uses subquery
-"select id from user having id in (select col from user)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user having id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user having id in (select col from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY, reference col from local table.
-"select col from user where id = 5 order by aa"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 5 order by aa",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 5 order by aa asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 5 order by aa",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 5 order by aa asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY uses column numbers
-"select col from user where id = 1 order by 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 order by 1 asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where id = 1 order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where id = 1 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on scatter
-"select col from user order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` order by col asc",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` order by col asc",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on select t.*
-"select t.*, t.col from user t order by t.col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY on select *
-"select *, col from user order by col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY on select multi t.*
-"select t.*, t.name, t.*, t.col from user t order by t.col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY on select multi *
-"select *, name, *, col from user order by col"
-"unsupported: in scatter query, can't order by a column that comes after `*` expressions in the SELECT list"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# ORDER BY works for select * from authoritative table
-"select * from authoritative order by user_id"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
- "OrderBy": "(0|3) ASC",
- "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
- "ResultColumns": 3,
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1",
- "OrderBy": "(0|3) ASC",
- "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc",
- "ResultColumns": 3,
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# ORDER BY works for select * from authoritative table
-"select * from authoritative order by col1"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc",
- "ResultColumns": 3,
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative order by col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
- "OrderBy": "1 ASC COLLATE latin1_swedish_ci",
- "Query": "select user_id, col1, col2 from authoritative order by col1 asc",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# ORDER BY on scatter with text column
-"select a, textcol1, b from user order by a, textcol1, b"
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
- "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "Query": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on scatter with text column, qualified name TODO: can plan better
-"select a, user.textcol1, b from user order by a, textcol1, b"
-{
- "QueryType": "SELECT",
- "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
- "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, user.textcol1, b from user order by a, textcol1, b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC",
- "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc",
- "ResultColumns": 3,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY on scatter with multiple text columns
-"select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2"
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1",
- "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC",
- "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
- "ResultColumns": 4,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` where 1 != 1",
- "OrderBy": "(0|4) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|5) ASC, 3 ASC COLLATE latin1_swedish_ci",
- "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc",
- "ResultColumns": 4,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY invalid col number on scatter
-"select col from user order by 2"
-"Unknown column '2' in 'order clause'"
-Gen4 plan same as above
-
-# ORDER BY column offset
-"select id as foo from music order by 1"
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by 1 asc",
- "ResultColumns": 1,
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by foo asc",
- "ResultColumns": 1,
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# ORDER BY NULL
-"select col from user order by null"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by null",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by null",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by null",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY after pull-out subquery
-"select col from user where col in (select col2 from user) order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by col",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by col",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "OrderBy": "0 ASC",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY NULL for join
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id order by null",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY non-key column for join
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY non-key column for implicit join
-"select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1",
- "OrderBy": "(1|3) ASC",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY NULL after pull-out subquery
-"select col from user where col in (select col2 from user) order by null"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by null",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by null",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY RAND()
-"select col from user order by RAND()"
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by RAND()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by RAND()",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user order by RAND()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` order by RAND()",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ORDER BY RAND() for join
-"select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()"
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_id": 2
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by RAND()",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1 order by RAND()",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.col3 from music where 1 != 1",
- "Query": "select music.col3 from music where music.id = :user_id order by RAND()",
- "Table": "music",
- "Values": [
- ":user_id"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# ORDER BY RAND() after pull-out subquery
-"select col from user where col in (select col2 from user) order by rand()"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by rand()",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col2 from user) order by rand()",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col2 from `user` where 1 != 1",
- "Query": "select col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, '*' expression
-"select * from user where id = 5 order by col"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, qualified '*' expression
-"select user.* from user where id = 5 order by user.col"
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, '*' expression with qualified reference
-"select * from user where id = 5 order by user.col"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, '*' expression in a subquery
-"select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)"
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_col": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
- "Query": "select u.id, u.col from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.col = :u_col",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "u_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
- "Query": "select u.col, u.id from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.col = :u_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Order by, verify outer symtab is searched according to its own context.
-"select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)"
-"symbol u.col not found in subquery"
-{
- "QueryType": "SELECT",
- "Original": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id from `user` as u where 1 != 1",
- "Query": "select u.id from `user` as u where u.id in (select col2 from `user` where `user`.id = u.id order by u.col asc)",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by, qualified '*' expression, name mismatched.
-"select user.* from user where id = 5 order by e.col"
-"symbol e.col not found"
-Gen4 plan same as above
-
-# Order by, invalid column number
-"select col from user order by 18446744073709551616"
-"error parsing column number: 18446744073709551616"
-Gen4 plan same as above
-
-# Order by, out of range column number
-"select col from user order by 2"
-"Unknown column '2' in 'order clause'"
-Gen4 plan same as above
-
-# Order by, '*' expression with qualified reference and using collate
-"select * from user where id = 5 order by user.col collate utf8_general_ci"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by with math functions
-"select * from user where id = 5 order by -col1"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by -col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by -col1 asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by -col1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by -col1 asc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by with string operations
-"select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by with math operations
-"select * from user where id = 5 order by id+col collate utf8_general_ci desc"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#Order by derived table column
-"select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1",
- "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` as u, (select user_id from user_extra where 1 != 1) as eu where 1 != 1",
- "Query": "select * from `user` as u, (select user_id from user_extra where user_id = 5) as eu where u.id = 5 and u.id = eu.user_id order by eu.user_id asc",
- "Table": "`user`, user_extra",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# routing rules: order by gets pushed for routes
-"select col from route1 where id = 1 order by col"
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from route1 where id = 1 order by col",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` as route1 where 1 != 1",
- "Query": "select col from `user` as route1 where id = 1 order by col asc",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# LIMIT
-"select col1 from user where id = 1 limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col1 from user where id = 1 limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user` where id = 1 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1 from user where id = 1 limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user` where id = 1 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# limit for joins. Can't push down the limit because result
-# counts get multiplied by join operations.
-"select user.col from user join user_extra limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# limit for scatter
-"select col from user limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# limit for scatter with bind var
-"select col from user limit :a"
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit :a",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": ":a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user limit :a",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": ":a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# cross-shard expression in parenthesis with limit
-"select * from user where (id1 = 4 AND name1 ='abc') limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# scatter limit after pullout subquery
-"select col from user where col in (select col1 from user) limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col1 from user) limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from user where col in (select col1 from user) limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1 from `user` where 1 != 1",
- "Query": "select col1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# limit on reference table
-"select col from ref limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select col from ref limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref limit 1",
- "Table": "ref"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from ref limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from ref where 1 != 1",
- "Query": "select col from ref limit 1",
- "Table": "ref"
- },
- "TablesUsed": [
- "user.ref"
- ]
-}
-
-# arithmetic limit
-"select id from user limit 1+1"
-{
- "QueryType": "SELECT",
- "Original": "select id from user limit 1+1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user limit 1+1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# order by column alias
-"select id as foo from music order by foo"
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by foo asc",
- "ResultColumns": 1,
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as foo from music order by foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id as foo, weight_string(id) from music order by foo asc",
- "ResultColumns": 1,
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# column alias for a table column in order by
-"select id as foo, id2 as id from music order by id"
-{
- "QueryType": "SELECT",
- "Original": "select id as foo, id2 as id from music order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
- "ResultColumns": 2,
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id as foo, id2 as id from music order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc",
- "ResultColumns": 2,
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# ordering on the left side of the join
-"select name from user, music order by name"
-{
- "QueryType": "SELECT",
- "Original": "select name from user, music order by name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select name from user, music order by name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column without group by
-"select count(id), num from user"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num from `user` where 1 != 1",
- "Query": "select count(id), num from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num from `user` where 1 != 1",
- "Query": "select count(id), num from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column with order by
-"select count(id), num from user order by 2"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user order by 2",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(1|2) ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
- "Query": "select count(id), num, weight_string(num) from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user order by 2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count(id), random(1) AS num",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` order by num asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column with group by
-"select count(id), num from user group by 2"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count(id)",
- "GroupBy": "(1|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# aggregation and non-aggregations column with group by and order by
-"select count(id), num from user group by 2 order by 1"
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2 order by 1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count",
- "GroupBy": "1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(id), num from user group by 2 order by 1",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count(0) AS count(id)",
- "GroupBy": "(1|2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)",
- "OrderBy": "(1|2) ASC",
- "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join order by with ambiguous column reference ; valid in MySQL
-"select name, name from user, music order by name"
-"ambiguous symbol reference: `name`"
-{
- "QueryType": "SELECT",
- "Original": "select name, name from user, music order by name",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:0",
- "TableName": "`user`_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# order by with ambiguous column reference ; valid in MySQL
-"select id, id from user order by id"
-"ambiguous symbol reference: id"
-{
- "QueryType": "SELECT",
- "Original": "select id, id from user order by id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select id, id, weight_string(id) from `user` order by id asc",
- "ResultColumns": 2,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Scatter order by and aggregation: order by column must reference column from select list
-"select col, count(*) from user group by col order by c1"
-"unsupported: memory sort: order by must reference a column in the select list: c1 asc"
-{
- "QueryType": "SELECT",
- "Original": "select col, count(*) from user group by col order by c1",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS count(*), random(2) AS c1",
- "GroupBy": "0",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col, count(*), c1, weight_string(c1) from `user` where 1 != 1 group by col",
- "OrderBy": "(2|3) ASC, 0 ASC",
- "Query": "select col, count(*), c1, weight_string(c1) from `user` group by col order by c1 asc, col asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Distinct with cross shard query
-"select distinct user.a from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.a from user join user_extra",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct user.a from user join user_extra",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|1)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select `user`.a, weight_string(`user`.a) from `user` order by `user`.a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Distinct with column alias
-"select distinct a as c, a from user"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a as c, a from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "0, 1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select distinct a as c, a from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Distinct with same column
-"select distinct a, a from user"
-"generating order by clause: ambiguous symbol reference: a"
-{
- "QueryType": "SELECT",
- "Original": "select distinct a, a from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "GroupBy": "(0|2), (1|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC, (0|2) ASC",
- "Query": "select distinct a, a, weight_string(a) from `user` order by a asc, a asc",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Order by has subqueries
-"select id from unsharded order by (select id from unsharded)"
-"unsupported: subqueries disallowed in GROUP or ORDER BY"
-{
- "QueryType": "SELECT",
- "Original": "select id from unsharded order by (select id from unsharded)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded order by (select id from unsharded) asc",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Equal filter with hexadecimal value
-"select count(*) a from user having a = 0x01"
-"unsupported: filtering on results of aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) a from user having a = 0x01",
- "Instructions": {
- "OperatorType": "Filter",
- "Predicate": ":0 = 0x01",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS a",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as a from `user` where 1 != 1",
- "Query": "select count(*) as a from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.json b/go/vt/vtgate/planbuilder/testdata/rails_cases.json
new file mode 100644
index 00000000000..89fdc4ff059
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/rails_cases.json
@@ -0,0 +1,218 @@
+[
+ {
+ "comment": "Author5.joins(books: [{orders: :customer}, :supplier])",
+ "query": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
+ "JoinVars": {
+ "book6s_supplier5_id": 4
+ },
+ "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4",
+ "JoinVars": {
+ "order2s_customer2_id": 5
+ },
+ "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
+ "JoinVars": {
+ "book6s_order2s_order2_id": 5
+ },
+ "TableName": "author5s, book6s_book6s_order2s_order2s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
+ "JoinVars": {
+ "book6s_id": 5
+ },
+ "TableName": "author5s, book6s_book6s_order2s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1",
+ "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id",
+ "Table": "author5s, book6s"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1",
+ "Query": "select book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id",
+ "Table": "book6s_order2s",
+ "Values": [
+ ":book6s_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1",
+ "Query": "select order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id",
+ "Table": "order2s"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from customer2s where 1 != 1",
+ "Query": "select 1 from customer2s where customer2s.id = :order2s_customer2_id",
+ "Table": "customer2s",
+ "Values": [
+ ":order2s_customer2_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from supplier5s where 1 != 1",
+ "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
+ "Table": "supplier5s",
+ "Values": [
+ ":book6s_supplier5_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:2,R:3",
+ "JoinVars": {
+ "order2s_id": 0
+ },
+ "TableName": "customer2s, order2s_author5s, book6s_book6s_order2s_supplier5s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select order2s.id from order2s, customer2s where 1 != 1",
+ "Query": "select order2s.id from order2s, customer2s where customer2s.id = order2s.customer2_id",
+ "Table": "customer2s, order2s"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:3,L:4",
+ "JoinVars": {
+ "book6s_supplier5_id": 0
+ },
+ "TableName": "author5s, book6s_book6s_order2s_supplier5s",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5",
+ "JoinVars": {
+ "book6s_id": 0
+ },
+ "TableName": "author5s, book6s_book6s_order2s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where 1 != 1",
+ "Query": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where book6s.author5_id = author5s.id",
+ "Table": "author5s, book6s"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from book6s_order2s where 1 != 1",
+ "Query": "select 1 from book6s_order2s where book6s_order2s.book6_id = :book6s_id and book6s_order2s.order2_id = :order2s_id",
+ "Table": "book6s_order2s",
+ "Values": [
+ ":book6s_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from supplier5s where 1 != 1",
+ "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
+ "Table": "supplier5s",
+ "Values": [
+ ":book6s_supplier5_id"
+ ],
+ "Vindex": "binary_md5"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.author5s",
+ "user.book6s",
+ "user.book6s_order2s",
+ "user.customer2s",
+ "user.order2s",
+ "user.supplier5s"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.txt b/go/vt/vtgate/planbuilder/testdata/rails_cases.txt
deleted file mode 100644
index ae4e12d3542..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/rails_cases.txt
+++ /dev/null
@@ -1,214 +0,0 @@
-# Author5.joins(books: [{orders: :customer}, :supplier])
-"select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id"
-{
- "QueryType": "SELECT",
- "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
- "JoinVars": {
- "book6s_supplier5_id": 4
- },
- "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4",
- "JoinVars": {
- "order2s_customer2_id": 5
- },
- "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
- "JoinVars": {
- "book6s_order2s_order2_id": 5
- },
- "TableName": "author5s, book6s_book6s_order2s_order2s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0",
- "JoinVars": {
- "book6s_id": 5
- },
- "TableName": "author5s, book6s_book6s_order2s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1",
- "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id",
- "Table": "author5s, book6s"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1",
- "Query": "select book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id",
- "Table": "book6s_order2s",
- "Values": [
- ":book6s_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1",
- "Query": "select order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id",
- "Table": "order2s"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from customer2s where 1 != 1",
- "Query": "select 1 from customer2s where customer2s.id = :order2s_customer2_id",
- "Table": "customer2s",
- "Values": [
- ":order2s_customer2_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from supplier5s where 1 != 1",
- "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
- "Table": "supplier5s",
- "Values": [
- ":book6s_supplier5_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,R:2,R:3",
- "JoinVars": {
- "order2s_id": 0
- },
- "TableName": "customer2s, order2s_author5s, book6s_book6s_order2s_supplier5s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select order2s.id from order2s, customer2s where 1 != 1",
- "Query": "select order2s.id from order2s, customer2s where customer2s.id = order2s.customer2_id",
- "Table": "customer2s, order2s"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:3,L:4",
- "JoinVars": {
- "book6s_supplier5_id": 0
- },
- "TableName": "author5s, book6s_book6s_order2s_supplier5s",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5",
- "JoinVars": {
- "book6s_id": 0
- },
- "TableName": "author5s, book6s_book6s_order2s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where 1 != 1",
- "Query": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where book6s.author5_id = author5s.id",
- "Table": "author5s, book6s"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from book6s_order2s where 1 != 1",
- "Query": "select 1 from book6s_order2s where book6s_order2s.book6_id = :book6s_id and book6s_order2s.order2_id = :order2s_id",
- "Table": "book6s_order2s",
- "Values": [
- ":book6s_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from supplier5s where 1 != 1",
- "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id",
- "Table": "supplier5s",
- "Values": [
- ":book6s_supplier5_id"
- ],
- "Vindex": "binary_md5"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.author5s",
- "user.book6s",
- "user.book6s_order2s",
- "user.customer2s",
- "user.order2s",
- "user.supplier5s"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json
new file mode 100644
index 00000000000..e1ca1f746d2
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json
@@ -0,0 +1,8066 @@
+[
+ {
+ "comment": "No column referenced",
+ "query": "select 1 from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "'*' expression for simple route",
+ "query": "select user.* from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unqualified '*' expression for simple route",
+ "query": "select * from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select aggregation with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select limit with timeout directive sets QueryTimeout in the route",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
+ "QueryTimeout": 1000,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select with partial scatter directive",
+ "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select aggregation with partial scatter directive",
+ "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select aggregation with partial scatter directive - added comments to try to confuse the hint extraction",
+ "query": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select limit with partial scatter directive",
+ "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
+ "ScatterErrorsAsWarnings": true,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "qualified '*' expression for simple route",
+ "query": "select user.* from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.* from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "fully qualified '*' expression for simple route",
+ "query": "select user.user.* from user.user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.* from user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.user.* from user.user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.* from `user` where 1 != 1",
+ "Query": "select `user`.* from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select * from authoritative table",
+ "query": "select * from authoritative",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
+ "Query": "select user_id, col1, col2 from authoritative",
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
+ "Query": "select user_id, col1, col2 from authoritative",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "select * from join of authoritative tables",
+ "query": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id where 1 != 1",
+ "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id",
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where 1 != 1",
+ "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where a.user_id = b.user_id",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "test table lookup failure for authoritative code path",
+ "query": "select a.* from authoritative",
+ "v3-plan": "table a not found",
+ "gen4-plan": "Unknown table 'a'"
+ },
+ {
+ "comment": "select * from qualified authoritative table",
+ "query": "select a.* from authoritative a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
+ "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
+ "Table": "authoritative"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.* from authoritative a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
+ "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
+ "Table": "authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative"
+ ]
+ }
+ },
+ {
+ "comment": "select * from intermixing of authoritative table with non-authoritative results in no expansion",
+ "query": "select * from authoritative join user on authoritative.user_id=user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative join user on authoritative.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1",
+ "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id",
+ "Table": "authoritative, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from authoritative join user on authoritative.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from authoritative, `user` where 1 != 1",
+ "Query": "select * from authoritative, `user` where authoritative.user_id = `user`.id",
+ "Table": "`user`, authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select authoritative.* with intermixing still expands",
+ "query": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1",
+ "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id",
+ "Table": "authoritative, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where 1 != 1",
+ "Query": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where a.user_id = `user`.id",
+ "Table": "`user`, authoritative"
+ },
+ "TablesUsed": [
+ "user.authoritative",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "auto-resolve anonymous columns for simple route",
+ "query": "select anon_col from user join user_extra on user.id = user_extra.user_id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
+ "Query": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select anon_col from `user`, user_extra where 1 != 1",
+ "Query": "select anon_col from `user`, user_extra where `user`.id = user_extra.user_id",
+ "Table": "`user`, user_extra"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Cannot auto-resolve for cross-shard joins",
+ "query": "select col from user join user_extra",
+ "v3-plan": "symbol col not found",
+ "gen4-plan": "Column 'col' in field list is ambiguous"
+ },
+ {
+ "comment": "Auto-resolve should work if unique vindex columns are referenced",
+ "query": "select id, user_id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, user_id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, user_id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "database calls should be substituted",
+ "query": "select database() from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select database() from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__vtdbname as database()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select database() from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ ":__vtdbname as database()"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "last_insert_id for unsharded route",
+ "query": "select last_insert_id() as x from main.unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() as x from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as x from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id() as x from main.unsharded",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
+ "Query": "select :__lastInsertId as x from unsharded",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select from dual on unqualified keyspace",
+ "query": "select @@session.auto_increment_increment from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "select from pinned table",
+ "query": "select * from pin_test",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from pin_test",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from pin_test where 1 != 1",
+ "Query": "select * from pin_test",
+ "Table": "pin_test",
+ "Values": [
+ "VARCHAR(\"\\x80\")"
+ ],
+ "Vindex": "binary"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from pin_test",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from pin_test where 1 != 1",
+ "Query": "select * from pin_test",
+ "Table": "pin_test",
+ "Values": [
+ "VARCHAR(\"\\x80\")"
+ ],
+ "Vindex": "binary"
+ },
+ "TablesUsed": [
+ "user.pin_test"
+ ]
+ }
+ },
+ {
+ "comment": "select from dual on sharded keyspace",
+ "query": "select @@session.auto_increment_increment from user.dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from user.dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@session.auto_increment_increment from user.dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
+ "Query": "select @@auto_increment_increment from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "user.dual"
+ ]
+ }
+ },
+ {
+ "comment": "RHS route referenced",
+ "query": "select user_extra.id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Both routes referenced",
+ "query": "select user.col, user_extra.id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Expression with single-route reference",
+ "query": "select user.col, user_extra.id + user_extra.col from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.id + user_extra.col from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.id + user_extra.col from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Jumbled references",
+ "query": "select user.col, user_extra.id, user.col2 from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.col2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
+ "Query": "select user_extra.id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Comments",
+ "query": "select /* comment */ user.col from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /* comment */ user.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select /* comment */ `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select /* comment */ 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /* comment */ user.col from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select /* comment */ `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select /* comment */ 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "for update",
+ "query": "select user.col from user join user_extra for update",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra for update",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` for update",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra for update",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col from user join user_extra for update",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` for update",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra for update",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Field query should work for joins select bind vars",
+ "query": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
+ "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
+ "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Case preservation",
+ "query": "select user.Col, user_extra.Id from user join user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.Col, user_extra.Id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.Col from `user` where 1 != 1",
+ "Query": "select `user`.Col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
+ "Query": "select user_extra.Id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.Col, user_extra.Id from user join user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.Col from `user` where 1 != 1",
+ "Query": "select `user`.Col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
+ "Query": "select user_extra.Id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "syntax error",
+ "query": "the quick brown fox",
+ "plan": "syntax error at position 4 near 'the'"
+ },
+ {
+ "comment": "Hex number is not treated as a simple value",
+ "query": "select * from user where id = 0x04",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 0x04",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 0x04",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where id = 0x04",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 0x04",
+ "Table": "`user`",
+ "Values": [
+ "VARBINARY(\"\\x04\")"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sharded limit offset",
+ "query": "select user_id from music order by user_id limit 10, 20",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_id from music order by user_id limit 10, 20",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(20)",
+ "Offset": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_id from music order by user_id limit 10, 20",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(20)",
+ "Offset": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Sharding Key Condition in Parenthesis",
+ "query": "select * from user where name ='abc' AND (id = 4) limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Multiple parenthesized expressions",
+ "query": "select * from user where (id = 4) AND (name ='abc') limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Multiple parenthesized expressions",
+ "query": "select * from user where (id = 4 and name ='abc') limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4 and name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 4 and name ='abc') limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column Aliasing with Table.Column",
+ "query": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column Aliasing with Column",
+ "query": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
+ "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Booleans and parenthesis",
+ "query": "select * from user where (id = 1) AND name = true limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Column as boolean-ish",
+ "query": "select * from user where (id = 1) AND name limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 1) AND name limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1 and `name` limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "PK as fake boolean, and column as boolean-ish",
+ "query": "select * from user where (id = 5) AND name = true limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 5) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user where (id = 5) AND name = true limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 5 and `name` = true limit 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "top level subquery in select",
+ "query": "select a, (select col from user) from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, (select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, (select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "sub-expression subquery in select",
+ "query": "select a, 1+(select col from user) from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, 1+(select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, 1 + :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a, 1+(select col from user) from unsharded",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
+ "Query": "select a, 1 + :__sq1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select * from derived table expands specific columns",
+ "query": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
+ "Query": "select `user`.id as id1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
+ "Query": "select user_extra.id as id2 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
+ "Query": "select `user`.id as id1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
+ "Query": "select user_extra.id as id2 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "duplicate columns not allowed in derived table",
+ "query": "select * from (select user.id, user_extra.id from user join user_extra) as t",
+ "v3-plan": "duplicate column names in subquery: id",
+ "gen4-plan": "Duplicate column name 'id'"
+ },
+ {
+ "comment": "non-existent symbol in cross-shard derived table",
+ "query": "select t.col from (select user.id from user join user_extra) as t",
+ "v3-plan": "symbol t.col not found in table or subquery",
+ "gen4-plan": "symbol t.col not found"
+ },
+ {
+ "comment": "union with the same target shard",
+ "query": "select * from music where user_id = 1 union select * from user where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union with the same target shard last_insert_id",
+ "query": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
+ "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union in derived table",
+ "query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
+ "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.col1, a.col2 from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
+ "Query": "select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union in subquery",
+ "query": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, `name` from unsharded where 1 != 1",
+ "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, `name` from unsharded where 1 != 1",
+ "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "query": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
+ "Table": "unsharded, unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded union",
+ "query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
+ "Table": "unsharded, unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded nested union",
+ "query": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
+ "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
+ "Table": "unsharded, unsharded_auto"
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "main.unsharded_auto"
+ ]
+ }
+ },
+ {
+ "comment": "unsharded nested union with limit",
+ "query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
+ "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
+ "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "routing rules: ensure directives are not lost",
+ "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded as route2 where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
+ "QueryTimeout": 1000,
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded as route2 where 1 != 1",
+ "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
+ "QueryTimeout": 1000,
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "testing SingleRow Projection",
+ "query": "select 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(42) as 42"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(42) as 42"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "don't filter on the vtgate",
+ "query": "select 42 from dual where false",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42 from dual where false",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual where false",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42 from dual where false",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual where false",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "testing SingleRow Projection with arithmetics",
+ "query": "select 42+2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42+2",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(44) as 42 + 2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42+2",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(44) as 42 + 2"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows without limit",
+ "query": "select sql_calc_found_rows * from music where user_id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows with limit",
+ "query": "select sql_calc_found_rows * from music limit 100",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music limit 100",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music limit 100",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows with SelectEqualUnique plans",
+ "query": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1 limit 2",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where user_id = 1 limit 2",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from music where 1 != 1",
+ "Query": "select count(*) from music where user_id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows with group by and having",
+ "query": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
+ "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
+ "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "sql_calc_found_rows in sub queries",
+ "query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)",
+ "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
+ },
+ {
+ "comment": "sql_calc_found_rows in derived table",
+ "query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1",
+ "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
+ },
+ {
+ "comment": "select from unsharded keyspace into dumpfile",
+ "query": "select * from main.unsharded into Dumpfile 'x.txt'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into dumpfile 'x.txt'",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into dumpfile 'x.txt'",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select from unsharded keyspace into outfile",
+ "query": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select from unsharded keyspace into outfile s3",
+ "query": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "Union after into outfile is incorrect",
+ "query": "select id from user into outfile 'out_file_name' union all select id from music",
+ "plan": "syntax error at position 55 near 'union'"
+ },
+ {
+ "comment": "Into outfile s3 in derived table is incorrect",
+ "query": "select id from (select id from user into outfile s3 'inner_outfile') as t2",
+ "plan": "syntax error at position 41 near 'into'"
+ },
+ {
+ "comment": "Into outfile s3 in derived table with union incorrect",
+ "query": "select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2",
+ "plan": "syntax error at position 41 near 'into'"
+ },
+ {
+ "comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "query": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
+ "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
+ "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Add two tables with the same column in a join",
+ "query": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`, user_extra_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id where 1 != 1",
+ "Query": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id",
+ "Table": "`user`, user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1",
+ "TableName": "unsharded_`user`, user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded where 1 != 1",
+ "Query": "select 1 from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select t.id, s.id from `user` as t, user_extra as s where 1 != 1",
+ "Query": "select t.id, s.id from `user` as t, user_extra as s where t.id = s.user_id",
+ "Table": "`user`, user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "((((select 1))))",
+ "query": "((((select 1))))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "((((select 1))))",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "((((select 1))))",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "INT64(1) as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Merging dual with user",
+ "query": "select 42, id from dual, user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, id from dual, user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "dual_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, id from dual, user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 42, id from dual, `user` where 1 != 1",
+ "Query": "select 42, id from dual, `user`",
+ "Table": "`user`, dual"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Table named \"dual\" with a qualifier joined on user should not be merged",
+ "query": "select 42, user.id from main.dual, user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, user.id from main.dual, user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "dual_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 42, user.id from main.dual, user",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "dual_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 42 from dual where 1 != 1",
+ "Query": "select 42 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "query": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "query": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
+ "Query": "select :__sq1 as a from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
+ "Query": "select :__sq1 as a from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra",
+ "query": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra",
+ "plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "plan test for a natural character set string",
+ "query": "select N'string' from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select N'string' from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "VARCHAR(\"string\") as N'string'"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select N'string' from dual",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "VARCHAR(\"string\") as N'string'"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "select expression having dependencies on both sides of a join",
+ "query": "select user.id * user_id as amount from user, user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id * user_id as amount from user, user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
+ "Query": "select :user_id * user_id as amount from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id * user_id as amount from user, user_extra",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
+ "Query": "select :user_id * user_id as amount from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery in exists clause",
+ "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "ProjectedIndexes": "-2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
+ "Query": "select `user`.id, col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery in exists clause with an order by",
+ "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "user_id": 0
+ },
+ "ProjectedIndexes": "-2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
+ "OrderBy": "1 ASC",
+ "Query": "select `user`.id, col from `user` order by col asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id",
+ "Table": "user_extra",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery having dependencies on two tables",
+ "query": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "u1_col": 0,
+ "u2_col": 1
+ },
+ "ProjectedIndexes": "-3",
+ "TableName": "`user`_`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1",
+ "Query": "select u1.col, 1 from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
+ "Query": "select u2.col from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery using a column twice",
+ "query": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
+ "Instructions": {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "ProjectedIndexes": "-2",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1",
+ "Query": "select u.col, 1 from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
+ "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "correlated subquery part of an OR clause",
+ "query": "select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
+ "v3-plan": "unsupported: cross-shard correlated subquery",
+ "gen4-plan": "exists sub-queries are only supported with AND clause"
+ },
+ {
+ "comment": "correlated subquery that is dependent on one side of a join, fully mergeable",
+ "query": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)",
+ "Table": "music, `user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id",
+ "Table": "`user`, music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union as a derived table",
+ "query": "select found from (select id as found from user union all (select id from unsharded)) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as found from `user` where 1 != 1",
+ "Query": "select id as found from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id as found from `user` where 1 != 1",
+ "Query": "select id as found from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from unsharded where 1 != 1",
+ "Query": "select id from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "use output column containing data from both sides of the join",
+ "query": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_col": 0,
+ "user_id": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
+ "Query": "select `user`.col, `user`.id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col + :user_col from user_extra where 1 != 1",
+ "Query": "select user_extra.col + :user_col from user_extra where user_extra.id = :user_id",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "user_extra_col": 1,
+ "user_extra_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.id, user_extra.col from user_extra where 1 != 1",
+ "Query": "select user_extra.id, user_extra.col from user_extra",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1",
+ "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id",
+ "Table": "`user`",
+ "Values": [
+ ":user_extra_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable derived table with order by and limit",
+ "query": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (select col from unsharded where 1 != 1) as f left join unsharded as u on f.col = u.id where 1 != 1",
+ "Query": "select 1 from (select col from unsharded order by unsharded.col1 desc limit 0, 12) as f left join unsharded as u on f.col = u.id",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "mergeable derived table with group by and limit",
+ "query": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1",
+ "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) > 0 limit 0, 12) as f left join unsharded as u on f.col = u.id",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "select user.id, trim(leading 'x' from user.name) from user",
+ "query": "select user.id, trim(leading 'x' from user.name) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, trim(leading 'x' from user.name) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.id, trim(leading 'x' from user.name) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "json utility functions",
+ "query": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
+ "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
+ "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "dual query with exists clause",
+ "query": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where information_schema.`TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and information_schema.`TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
+ "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and `TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
+ "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES",
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "json_quote, json_object and json_array",
+ "query": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
+ "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
+ "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "select (select id from user order by id limit 1) from user_extra",
+ "query": "select (select id from user order by id limit 1) from user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select id from user order by id limit 1) from user_extra",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
+ "Query": "select :__sq1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select id from user order by id limit 1) from user_extra",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
+ "Query": "select :__sq1 from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "yeah, it does not make sense, but it's valid",
+ "query": "select exists(select 1) from user where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select 1) from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual limit 1",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq_has_values1 from `user` where 1 != 1",
+ "Query": "select :__sq_has_values1 from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select 1) from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1",
+ "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "json schema validation functions",
+ "query": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "json search functions",
+ "query": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
+ "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
+ "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Json extract and json unquote shorthands",
+ "query": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1",
+ "Query": "select a -> '$[4]', a ->> '$[3]' from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1",
+ "Query": "select a -> '$[4]', a ->> '$[3]' from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "groupe by with non aggregated columns and table alias",
+ "query": "select u.id, u.age from user u group by u.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.age from user u group by u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
+ "Query": "select u.id, u.age from `user` as u group by u.id",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, u.age from user u group by u.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
+ "Query": "select u.id, u.age from `user` as u group by u.id",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Functions that return JSON value attributes",
+ "query": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
+ "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
+ "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Json array functions",
+ "query": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
+ "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
+ "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Json merge functions",
+ "query": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
+ "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
+ "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "JSON modifier functions",
+ "query": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
+ "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Reference with a subquery which can be merged",
+ "query": "select exists(select id from user where id = 4)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select id from user where id = 4)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where id = 4 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select id from user where id = 4)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(4)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Reference with a subquery which cannot be merged",
+ "query": "select exists(select * from user)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` limit :__upper_limit",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "insert function not requiring any table",
+ "query": "select insert('Quadratic', 3, 4, 'What')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert('Quadratic', 3, 4, 'What')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
+ "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert('Quadratic', 3, 4, 'What')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
+ "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "insert function using column names as arguments",
+ "query": "select insert(tcol1, id, 3, tcol2) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert(tcol1, id, 3, tcol2) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
+ "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select insert(tcol1, id, 3, tcol2) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
+ "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "gtid functions",
+ "query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
+ "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
+ "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "Predicate in apply join which is merged",
+ "query": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_extra_user_id": 1
+ },
+ "TableName": "`user`_user_extra_user_metadata",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
+ "Query": "select user_extra.user_id from user_extra where user_extra.col = :user_col",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_metadata.user_id from user_metadata where 1 != 1",
+ "Query": "select user_metadata.user_id from user_metadata where user_metadata.user_id = :user_extra_user_id",
+ "Table": "user_metadata",
+ "Values": [
+ ":user_extra_user_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_col": 0
+ },
+ "TableName": "`user`_user_extra, user_metadata",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_metadata.user_id from user_extra, user_metadata where 1 != 1",
+ "Query": "select user_metadata.user_id from user_extra, user_metadata where user_extra.col = :user_col and user_extra.user_id = user_metadata.user_id",
+ "Table": "user_extra, user_metadata"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra",
+ "user.user_metadata"
+ ]
+ }
+ },
+ {
+ "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates",
+ "query": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where 1 != 1",
+ "Query": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where `user`.id = 123 and music.id = 456",
+ "Table": "`user`, music_extra, music",
+ "Values": [
+ "INT64(123)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id from `user`, music_extra, music where 1 != 1",
+ "Query": "select `user`.id from `user`, music_extra, music where music.id = 456 and `user`.id = 123 and `user`.id = music_extra.user_id and music_extra.user_id = music.user_id",
+ "Table": "`user`, music, music_extra",
+ "Values": [
+ "INT64(123)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.music_extra",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "SQL_CALC_FOUND_ROWS with vindex lookup",
+ "query": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where `name` = 'aa'",
+ "Table": "`user`",
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
+ "Instructions": {
+ "OperatorType": "SQL_CALC_FOUND_ROWS",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(2)",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|2) ASC",
+ "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
+ "ResultColumns": 2,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS count(*)",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "VARCHAR(\"aa\")"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from `user` where 1 != 1",
+ "Query": "select count(*) from `user` where `name` = 'aa'",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "`None` route being merged with another route via join predicate on Vindex columns",
+ "query": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
+ "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id in (null) and `user`.id = 5",
+ "Table": "music, `user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, `user` where 1 != 1",
+ "Query": "select music.id from music, `user` where music.user_id in (null) and `user`.id = 5 and music.user_id = `user`.id",
+ "Table": "`user`, music"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Treating single value tuples as `EqualUnique` routes",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(5))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with derived table",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select music.id from music where 1 != 1) as _inner where 1 != 1",
+ "Query": "select * from (select music.id from music where music.user_id in ::__vals) as _inner",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.foo = 'bar'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "(INT64(3), INT64(4), INT64(5))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(3), INT64(4), INT64(5))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(1), INT64(2), INT64(3))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) or music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`IN` comparison on Vindex with `None` subquery, as routing predicate",
+ "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`IN` comparison on Vindex with `None` subquery, as non-routing predicate",
+ "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable scatter subquery",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.genre = 'pop'",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1 group by music.id",
+ "Query": "select music.id from music where music.genre = 'pop' group by music.id",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)",
+ "v3-plan": "unsupported: in scatter query: group by column must reference column in SELECT list",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "random(0) AS id",
+ "GroupBy": "(1|2)",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)",
+ "OrderBy": "(1|2) ASC",
+ "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable scatter subquery with LIMIT",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id",
+ "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable subquery with `MAX` aggregate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0) AS max(music.id)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id in ::__vals",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with `LIMIT` due to `EqualUnique` route",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(music.id) from music where 1 != 1",
+ "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with multiple levels of derived statements",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select * from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1",
+ "Query": "select * from (select * from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit)",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
+ "Table": "music",
+ "Values": [
+ "(INT64(5))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in (5) limit 10) as subquery_for_limit) as subquery_for_limit)",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6))"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Unmergeable subquery with multiple levels of derived statements",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music limit :__upper_limit",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
+ "Table": "music",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null))",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge",
+ "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "`None` subquery nested inside `OR` expression - outer query keeps routing information",
+ "query": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "None",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.user_id in (null)",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
+ "Table": "music"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
+ "Table": "music"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together",
+ "query": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "music_id": 0
+ },
+ "TableName": "music_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
+ "Query": "select 1 from (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = :music_id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
+ "Query": "select music.id from music, (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = music.id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Joining with a subquery that uses an `EqualUnique` route can be merged",
+ "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
+ "Query": "select music.id from music join (select id from music where music.user_id = 5) as other on other.id = music.id",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
+ "Query": "select music.id from music, (select id from music where music.user_id = 5) as other where other.id = music.id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Joining with a subquery that has an `IN` route can be merged",
+ "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
+ "Query": "select music.id from music join (select id from music where music.user_id in (5, 6, 7)) as other on other.id = music.id",
+ "Table": "music"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
+ "Query": "select music.id from music, (select id from music where music.user_id in ::__vals) as other where other.id = music.id",
+ "Table": "music",
+ "Values": [
+ "(INT64(5), INT64(6), INT64(7))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "limit on the vtgate has to be executed on the LHS of a join",
+ "query": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "ue_user_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where `user`.id = :ue_user_id",
+ "Table": "`user`",
+ "Values": [
+ ":ue_user_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
+ "query": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "t_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS b",
+ "GroupBy": "(0|3), (2|4)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra where 1 != 1 group by id, weight_string(id), req, weight_string(req)",
+ "OrderBy": "(0|3) ASC, (2|4) ASC",
+ "Query": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra group by id, weight_string(id), req, weight_string(req) order by id asc, req asc",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.a from `user` where 1 != 1",
+ "Query": "select `user`.a from `user` where `user`.id = :t_id",
+ "Table": "`user`",
+ "Values": [
+ ":t_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "cant switch sides for outer joins",
+ "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
+ "plan": "unsupported: LEFT JOIN not supported for derived tables"
+ },
+ {
+ "comment": "limit on both sides means that we can't evaluate this at all",
+ "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": "unsupported: JOIN not supported between derived tables"
+ },
+ {
+ "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "query": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "other_maxt": 0
+ },
+ "TableName": "music_music",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select other.maxt from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
+ "Query": "select other.maxt from (select max(id) as maxt from music where music.user_id = 5) as other",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from music where 1 != 1",
+ "Query": "select music.id from music where music.id = :other_maxt",
+ "Table": "music",
+ "Values": [
+ ":other_maxt"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1",
+ "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id",
+ "Table": "music",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246",
+ "query": "SELECT 1 as x, (SELECT x)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT 1 as x, (SELECT x)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
+ "Query": "select 1 as x, (select x from dual) from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT 1 as x, (SELECT x)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
+ "Query": "select 1 as x, (select x from dual) from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "query with a derived table and dual table in unsharded keyspace",
+ "query": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1",
+ "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1",
+ "Table": "unsharded_a, dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1",
+ "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1",
+ "Table": "dual, unsharded_a"
+ },
+ "TablesUsed": [
+ "main.dual",
+ "main.unsharded_a"
+ ]
+ }
+ },
+ {
+ "comment": "subquery having join table on clause, using column reference of outer select table",
+ "query": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1",
+ "v3-plan": "symbol u3.id not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select (select 1 from `user` as u1 join `user` as u2 on u1.id = u2.id and u1.id = u3.id where 1 != 1) as subquery from `user` as u3 where 1 != 1",
+ "Query": "select (select 1 from `user` as u1 join `user` as u2 on u1.id = u2.id and u1.id = u3.id) as subquery from `user` as u3 where u3.id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "allow last_insert_id with argument",
+ "query": "select last_insert_id(id) from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id(id) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select last_insert_id(id) from `user` where 1 != 1",
+ "Query": "select last_insert_id(id) from `user`",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select last_insert_id(id) from user",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select last_insert_id(id) from `user` where 1 != 1",
+ "Query": "select last_insert_id(id) from `user`",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "column with qualifier is correctly used",
+ "query": "select u.foo, ue.foo as apa from user u, user_extra ue order by foo ",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.foo, ue.foo as apa from user u, user_extra ue order by foo ",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.foo, weight_string(u.foo) from `user` as u where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select u.foo, weight_string(u.foo) from `user` as u order by foo asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.foo as apa from user_extra as ue where 1 != 1",
+ "Query": "select ue.foo as apa from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.foo, ue.foo as apa from user u, user_extra ue order by foo ",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.foo, weight_string(u.foo) from `user` as u where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select u.foo, weight_string(u.foo) from `user` as u order by foo asc",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.foo as apa from user_extra as ue where 1 != 1",
+ "Query": "select ue.foo as apa from user_extra as ue",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.txt b/go/vt/vtgate/planbuilder/testdata/select_cases.txt
deleted file mode 100644
index 3072533565c..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/select_cases.txt
+++ /dev/null
@@ -1,7732 +0,0 @@
-# No column referenced
-"select 1 from user"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# '*' expression for simple route
-"select user.* from user"
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# unqualified '*' expression for simple route
-"select * from user"
-{
- "QueryType": "SELECT",
- "Original": "select * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select with timeout directive sets QueryTimeout in the route
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select aggregation with timeout directive sets QueryTimeout in the route
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select limit with timeout directive sets QueryTimeout in the route
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit",
- "QueryTimeout": 1000,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select with partial scatter directive
-"select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select aggregation with partial scatter directive
-"select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select aggregation with partial scatter directive - added comments to try to confuse the hint extraction
-"/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user"
-{
- "QueryType": "SELECT",
- "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select limit with partial scatter directive
-"select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit",
- "ScatterErrorsAsWarnings": true,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# qualified '*' expression for simple route
-"select user.* from user"
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.* from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# fully qualified '*' expression for simple route
-"select user.user.* from user.user"
-{
- "QueryType": "SELECT",
- "Original": "select user.user.* from user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.user.* from user.user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.* from `user` where 1 != 1",
- "Query": "select `user`.* from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# select * from authoritative table
-"select * from authoritative"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
- "Query": "select user_id, col1, col2 from authoritative",
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1",
- "Query": "select user_id, col1, col2 from authoritative",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# select * from join of authoritative tables
-"select * from authoritative a join authoritative b on a.user_id=b.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id where 1 != 1",
- "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id",
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where 1 != 1",
- "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where a.user_id = b.user_id",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# test table lookup failure for authoritative code path
-"select a.* from authoritative"
-"table a not found"
-Gen4 error: Unknown table 'a'
-
-# select * from qualified authoritative table
-"select a.* from authoritative a"
-{
- "QueryType": "SELECT",
- "Original": "select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
- "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
- "Table": "authoritative"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a.* from authoritative a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1",
- "Query": "select a.user_id, a.col1, a.col2 from authoritative as a",
- "Table": "authoritative"
- },
- "TablesUsed": [
- "user.authoritative"
- ]
-}
-
-# select * from intermixing of authoritative table with non-authoritative results in no expansion
-"select * from authoritative join user on authoritative.user_id=user.id"
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative join user on authoritative.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1",
- "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id",
- "Table": "authoritative, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from authoritative join user on authoritative.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from authoritative, `user` where 1 != 1",
- "Query": "select * from authoritative, `user` where authoritative.user_id = `user`.id",
- "Table": "`user`, authoritative"
- },
- "TablesUsed": [
- "user.authoritative",
- "user.user"
- ]
-}
-
-# select authoritative.* with intermixing still expands
-"select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1",
- "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id",
- "Table": "authoritative, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where 1 != 1",
- "Query": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where a.user_id = `user`.id",
- "Table": "`user`, authoritative"
- },
- "TablesUsed": [
- "user.authoritative",
- "user.user"
- ]
-}
-
-# auto-resolve anonymous columns for simple route
-"select anon_col from user join user_extra on user.id = user_extra.user_id"
-{
- "QueryType": "SELECT",
- "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1",
- "Query": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select anon_col from `user`, user_extra where 1 != 1",
- "Query": "select anon_col from `user`, user_extra where `user`.id = user_extra.user_id",
- "Table": "`user`, user_extra"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Cannot auto-resolve for cross-shard joins
-"select col from user join user_extra"
-"symbol col not found"
-Gen4 error: Column 'col' in field list is ambiguous
-
-# Auto-resolve should work if unique vindex columns are referenced
-"select id, user_id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select id, user_id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, user_id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# database calls should be substituted
-"select database() from dual"
-{
- "QueryType": "SELECT",
- "Original": "select database() from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__vtdbname as database()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select database() from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- ":__vtdbname as database()"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# last_insert_id for unsharded route
-"select last_insert_id() as x from main.unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() as x from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as x from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select last_insert_id() as x from main.unsharded",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1",
- "Query": "select :__lastInsertId as x from unsharded",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# select from dual on unqualified keyspace
-"select @@session.auto_increment_increment from dual"
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# select from pinned table
-"select * from pin_test"
-{
- "QueryType": "SELECT",
- "Original": "select * from pin_test",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from pin_test where 1 != 1",
- "Query": "select * from pin_test",
- "Table": "pin_test",
- "Values": [
- "VARCHAR(\"\\x80\")"
- ],
- "Vindex": "binary"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from pin_test",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from pin_test where 1 != 1",
- "Query": "select * from pin_test",
- "Table": "pin_test",
- "Values": [
- "VARCHAR(\"\\x80\")"
- ],
- "Vindex": "binary"
- },
- "TablesUsed": [
- "user.pin_test"
- ]
-}
-
-# select from dual on sharded keyspace
-"select @@session.auto_increment_increment from user.dual"
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from user.dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select @@session.auto_increment_increment from user.dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1",
- "Query": "select @@auto_increment_increment from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "user.dual"
- ]
-}
-
-# RHS route referenced
-"select user_extra.id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Both routes referenced
-"select user.col, user_extra.id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Expression with single-route reference
-"select user.col, user_extra.id + user_extra.col from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.id + user_extra.col from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.id + user_extra.col from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Jumbled references
-"select user.col, user_extra.id, user.col2 from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_extra.id, user.col2 from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.col2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id from user_extra where 1 != 1",
- "Query": "select user_extra.id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Comments
-"select /* comment */ user.col from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select /* comment */ user.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select /* comment */ `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select /* comment */ 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /* comment */ user.col from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select /* comment */ `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select /* comment */ 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# for update
-"select user.col from user join user_extra for update"
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra for update",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` for update",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra for update",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col from user join user_extra for update",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` for update",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra for update",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Field query should work for joins select bind vars
-"select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
- "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1",
- "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# Case preservation
-"select user.Col, user_extra.Id from user join user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.Col, user_extra.Id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.Col from `user` where 1 != 1",
- "Query": "select `user`.Col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
- "Query": "select user_extra.Id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.Col, user_extra.Id from user join user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.Col from `user` where 1 != 1",
- "Query": "select `user`.Col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.Id from user_extra where 1 != 1",
- "Query": "select user_extra.Id from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# syntax error
-"the quick brown fox"
-"syntax error at position 4 near 'the'"
-Gen4 plan same as above
-
-# Hex number is not treated as a simple value
-"select * from user where id = 0x04"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 0x04",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 0x04",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where id = 0x04",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 0x04",
- "Table": "`user`",
- "Values": [
- "VARBINARY(\"\\x04\")"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# sharded limit offset
-"select user_id from music order by user_id limit 10, 20"
-{
- "QueryType": "SELECT",
- "Original": "select user_id from music order by user_id limit 10, 20",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(20)",
- "Offset": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_id from music order by user_id limit 10, 20",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(20)",
- "Offset": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Sharding Key Condition in Parenthesis
-"select * from user where name ='abc' AND (id = 4) limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where name ='abc' AND (id = 4) limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Multiple parenthesized expressions
-"select * from user where (id = 4) AND (name ='abc') limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4) AND (name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Multiple parenthesized expressions
-"select * from user where (id = 4 and name ='abc') limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4 and name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 4 and name ='abc') limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column Aliasing with Table.Column
-"select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column Aliasing with Column
-"select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3"
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1",
- "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Booleans and parenthesis
-"select * from user where (id = 1) AND name = true limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Column as boolean-ish
-"select * from user where (id = 1) AND name limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 1) AND name limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1 and `name` limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# PK as fake boolean, and column as boolean-ish
-"select * from user where (id = 5) AND name = true limit 5"
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 5) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user where (id = 5) AND name = true limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 5 and `name` = true limit 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# top level subquery in select
-"select a, (select col from user) from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select a, (select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
- "Query": "select a, :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, (select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1",
- "Query": "select a, :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# sub-expression subquery in select
-"select a, 1+(select col from user) from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select a, 1+(select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
- "Query": "select a, 1 + :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a, 1+(select col from user) from unsharded",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1",
- "Query": "select a, 1 + :__sq1 from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# select * from derived table expands specific columns
-"select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
- "Query": "select `user`.id as id1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
- "Query": "select user_extra.id as id2 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1",
- "Query": "select `user`.id as id1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1",
- "Query": "select user_extra.id as id2 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# duplicate columns not allowed in derived table
-"select * from (select user.id, user_extra.id from user join user_extra) as t"
-"duplicate column names in subquery: id"
-Gen4 error: Duplicate column name 'id'
-
-# non-existent symbol in cross-shard derived table
-"select t.col from (select user.id from user join user_extra) as t"
-"symbol t.col not found in table or subquery"
-Gen4 error: symbol t.col not found
-
-# union with the same target shard
-"select * from music where user_id = 1 union select * from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select * from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union with the same target shard last_insert_id
-"select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1",
- "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# unsharded union in derived table
-"select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
- "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.col1, a.col2 from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1",
- "Query": "select a.col1, a.col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# unsharded union in subquery
-"select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)"
-{
- "QueryType": "SELECT",
- "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, `name` from unsharded where 1 != 1",
- "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id, `name` from unsharded where 1 != 1",
- "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-"(select id from unsharded) union (select id from unsharded_auto) order by id limit 5"
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5",
- "Table": "unsharded, unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# unsharded union
-"select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)"
-{
- "QueryType": "SELECT",
- "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)",
- "Table": "unsharded, unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# unsharded nested union
-"(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1",
- "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded",
- "Table": "unsharded, unsharded_auto"
- },
- "TablesUsed": [
- "main.unsharded",
- "main.unsharded_auto"
- ]
-}
-
-# unsharded nested union with limit
-"(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1"
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
- "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)",
- "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# routing rules: ensure directives are not lost
-"select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded as route2 where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
- "QueryTimeout": 1000,
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded as route2 where 1 != 1",
- "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2",
- "QueryTimeout": 1000,
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# testing SingleRow Projection
-"select 42"
-{
- "QueryType": "SELECT",
- "Original": "select 42",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(42) as 42"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(42) as 42"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# don't filter on the vtgate
-"select 42 from dual where false"
-{
- "QueryType": "SELECT",
- "Original": "select 42 from dual where false",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual where false",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42 from dual where false",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual where false",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# testing SingleRow Projection with arithmetics
-"select 42+2"
-{
- "QueryType": "SELECT",
- "Original": "select 42+2",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(44) as 42 + 2"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42+2",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(44) as 42 + 2"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# sql_calc_found_rows without limit
-"select sql_calc_found_rows * from music where user_id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows with limit
-"select sql_calc_found_rows * from music limit 100"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music limit 100",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music limit 100",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music",
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows with SelectEqualUnique plans
-"select sql_calc_found_rows * from music where user_id = 1 limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1 limit 2",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where user_id = 1 limit 2",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from music where 1 != 1",
- "Query": "select count(*) from music where user_id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows with group by and having
-"select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
- "OrderBy": "(0|2) ASC",
- "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
- "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id",
- "OrderBy": "(0|2) ASC",
- "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1",
- "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t",
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# sql_calc_found_rows in sub queries
-"select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)"
-"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
-Gen4 plan same as above
-
-# sql_calc_found_rows in derived table
-"select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1"
-"Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'"
-Gen4 plan same as above
-
-# select from unsharded keyspace into dumpfile
-"select * from main.unsharded into Dumpfile 'x.txt'"
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into dumpfile 'x.txt'",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into Dumpfile 'x.txt'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into dumpfile 'x.txt'",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# select from unsharded keyspace into outfile
-"select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'"
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# select from unsharded keyspace into outfile s3
-"select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off"
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# Union after into outfile is incorrect
-"select id from user into outfile 'out_file_name' union all select id from music"
-"syntax error at position 55 near 'union'"
-Gen4 plan same as above
-
-# Into outfile s3 in derived table is incorrect
-"select id from (select id from user into outfile s3 'inner_outfile') as t2"
-"syntax error at position 41 near 'into'"
-Gen4 plan same as above
-
-# Into outfile s3 in derived table with union incorrect
-"select id from (select id from user into outfile s3 'inner_outfile' union select 1) as t2"
-"syntax error at position 41 near 'into'"
-Gen4 plan same as above
-
-"select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
- "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1",
- "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Add two tables with the same column in a join
-"select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`, user_extra_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id where 1 != 1",
- "Query": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id",
- "Table": "`user`, user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1",
- "TableName": "unsharded_`user`, user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded where 1 != 1",
- "Query": "select 1 from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select t.id, s.id from `user` as t, user_extra as s where 1 != 1",
- "Query": "select t.id, s.id from `user` as t, user_extra as s where t.id = s.user_id",
- "Table": "`user`, user_extra"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-"((((select 1))))"
-{
- "QueryType": "SELECT",
- "Original": "((((select 1))))",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "((((select 1))))",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "INT64(1) as 1"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Merging dual with user
-"select 42, id from dual, user"
-{
- "QueryType": "SELECT",
- "Original": "select 42, id from dual, user",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "dual_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42, id from dual, user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 42, id from dual, `user` where 1 != 1",
- "Query": "select 42, id from dual, `user`",
- "Table": "`user`, dual"
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# Table named "dual" with a qualifier joined on user should not be merged
-"select 42, user.id from main.dual, user"
-{
- "QueryType": "SELECT",
- "Original": "select 42, user.id from main.dual, user",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "dual_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 42, user.id from main.dual, user",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "dual_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 42 from dual where 1 != 1",
- "Query": "select 42 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-"select (select col from user limit 1) as a from user join user_extra order by a"
-{
- "QueryType": "SELECT",
- "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (select col from user limit 1) as a from user join user_extra order by a",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select t.a from (select (select col from user limit 1) as a from user join user_extra) t"
-{
- "QueryType": "SELECT",
- "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
- "Query": "select :__sq1 as a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 as a from `user` where 1 != 1",
- "Query": "select :__sq1 as a from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra"
-"unsupported: cross-shard correlated subquery"
-Gen4 plan same as above
-
-# plan test for a natural character set string
-"select N'string' from dual"
-{
- "QueryType": "SELECT",
- "Original": "select N'string' from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "VARCHAR(\"string\") as N'string'"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select N'string' from dual",
- "Instructions": {
- "OperatorType": "Projection",
- "Expressions": [
- "VARCHAR(\"string\") as N'string'"
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# select expression having dependencies on both sides of a join
-"select user.id * user_id as amount from user, user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select user.id * user_id as amount from user, user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
- "Query": "select :user_id * user_id as amount from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id * user_id as amount from user, user_extra",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_id": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` where 1 != 1",
- "Query": "select `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1",
- "Query": "select :user_id * user_id as amount from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery in exists clause
-"select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id)",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_id": 0
- },
- "ProjectedIndexes": "-2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
- "Query": "select `user`.id, col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id \u003c :user_id",
- "Table": "user_extra",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery in exists clause with an order by
-"select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by col"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id \u003c user.id) order by col",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "user_id": 0
- },
- "ProjectedIndexes": "-2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, col from `user` where 1 != 1",
- "OrderBy": "1 ASC",
- "Query": "select `user`.id, col from `user` order by col asc",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_id = 3 and user_id \u003c :user_id",
- "Table": "user_extra",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery having dependencies on two tables
-"select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "u1_col": 0,
- "u2_col": 1
- },
- "ProjectedIndexes": "-3",
- "TableName": "`user`_`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1",
- "Query": "select u1.col, 1 from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
- "Query": "select u2.col from `user` as u2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery using a column twice
-"select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)"
-"unsupported: cross-shard correlated subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)",
- "Instructions": {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "u_col": 0
- },
- "ProjectedIndexes": "-2",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1",
- "Query": "select u.col, 1 from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra as ue where 1 != 1",
- "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# correlated subquery part of an OR clause
-"select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)"
-"unsupported: cross-shard correlated subquery"
-Gen4 error: exists sub-queries are only supported with AND clause
-
-# correlated subquery that is dependent on one side of a join, fully mergeable
-"SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)",
- "Table": "music, `user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id",
- "Table": "`user`, music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union as a derived table
-"select found from (select id as found from user union all (select id from unsharded)) as t"
-{
- "QueryType": "SELECT",
- "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as found from `user` where 1 != 1",
- "Query": "select id as found from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select found from (select id as found from user union all (select id from unsharded)) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id as found from `user` where 1 != 1",
- "Query": "select id as found from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from unsharded where 1 != 1",
- "Query": "select id from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# use output column containing data from both sides of the join
-"select user_extra.col + user.col from user join user_extra on user.id = user_extra.id"
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_col": 0,
- "user_id": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1",
- "Query": "select `user`.col, `user`.id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.col + :user_col from user_extra where 1 != 1",
- "Query": "select user_extra.col + :user_col from user_extra where user_extra.id = :user_id",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "user_extra_col": 1,
- "user_extra_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.id, user_extra.col from user_extra where 1 != 1",
- "Query": "select user_extra.id, user_extra.col from user_extra",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1",
- "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id",
- "Table": "`user`",
- "Values": [
- ":user_extra_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# mergeable derived table with order by and limit
-"select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (select col from unsharded where 1 != 1) as f left join unsharded as u on f.col = u.id where 1 != 1",
- "Query": "select 1 from (select col from unsharded order by unsharded.col1 desc limit 0, 12) as f left join unsharded as u on f.col = u.id",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-# mergeable derived table with group by and limit
-"select 1 from (select col, count(*) as a from main.unsharded group by col having a \u003e 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select col, count(*) as a from main.unsharded group by col having a \u003e 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1",
- "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) \u003e 0 limit 0, 12) as f left join unsharded as u on f.col = u.id",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-Gen4 plan same as above
-
-"select user.id, trim(leading 'x' from user.name) from user"
-{
- "QueryType": "SELECT",
- "Original": "select user.id, trim(leading 'x' from user.name) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.id, trim(leading 'x' from user.name) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# json utility functions
-"select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user"
-{
- "QueryType": "SELECT",
- "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
- "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1",
- "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# dual query with exists clause
-"select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where information_schema.`TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and information_schema.`TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
- "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
- "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and `TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)",
- "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]",
- "SysTableTableSchema": "[VARCHAR(\"mysql\")]",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# json_quote, json_object and json_array
-"SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())"
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
- "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1",
- "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-"select (select id from user order by id limit 1) from user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select (select id from user order by id limit 1) from user_extra",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
- "Query": "select :__sq1 from user_extra",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select (select id from user order by id limit 1) from user_extra",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq1 from user_extra where 1 != 1",
- "Query": "select :__sq1 from user_extra",
- "Table": "user_extra"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# yeah, it does not make sense, but it's valid
-"select exists(select 1) from user where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select 1) from user where id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual limit 1",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq_has_values1 from `user` where 1 != 1",
- "Query": "select :__sq_has_values1 from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select 1) from user where id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1",
- "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# json schema validation functions
-"SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# json search functions
-"SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
- "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1",
- "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Json extract and json unquote shorthands
-"SELECT a-\u003e\"$[4]\", a-\u003e\u003e\"$[3]\" FROM user"
-{
- "QueryType": "SELECT",
- "Original": "SELECT a-\u003e\"$[4]\", a-\u003e\u003e\"$[3]\" FROM user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user` where 1 != 1",
- "Query": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT a-\u003e\"$[4]\", a-\u003e\u003e\"$[3]\" FROM user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user` where 1 != 1",
- "Query": "select a -\u003e '$[4]', a -\u003e\u003e '$[3]' from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# groupe by with non aggregated columns and table alias
-"select u.id, u.age from user u group by u.id"
-{
- "QueryType": "SELECT",
- "Original": "select u.id, u.age from user u group by u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
- "Query": "select u.id, u.age from `user` as u group by u.id",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id, u.age from user u group by u.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id",
- "Query": "select u.id, u.age from `user` as u group by u.id",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Functions that return JSON value attributes
-"select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
- "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1",
- "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Json array functions
-"select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
- "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1",
- "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Json merge functions
-"select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
- "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1",
- "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# JSON modifier functions
-"select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')"
-{
- "QueryType": "SELECT",
- "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1",
- "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Reference with a subquery which can be merged
-"select exists(select id from user where id = 4)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select id from user where id = 4)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where id = 4 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select id from user where id = 4)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(4)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# Reference with a subquery which cannot be merged
-"select exists(select * from user)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` limit :__upper_limit",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# insert function not requiring any table
-"select insert('Quadratic', 3, 4, 'What')"
-{
- "QueryType": "SELECT",
- "Original": "select insert('Quadratic', 3, 4, 'What')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
- "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select insert('Quadratic', 3, 4, 'What')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1",
- "Query": "select insert('Quadratic', 3, 4, 'What') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# insert function using column names as arguments
-"select insert(tcol1, id, 3, tcol2) from user"
-{
- "QueryType": "SELECT",
- "Original": "select insert(tcol1, id, 3, tcol2) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
- "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select insert(tcol1, id, 3, tcol2) from user",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1",
- "Query": "select insert(tcol1, id, 3, tcol2) from `user`",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# gtid functions
-"select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')"
-{
- "QueryType": "SELECT",
- "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
- "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1",
- "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# Predicate in apply join which is merged
-"select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'"
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_extra_user_id": 1
- },
- "TableName": "`user`_user_extra_user_metadata",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1",
- "Query": "select user_extra.user_id from user_extra where user_extra.col = :user_col",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_metadata.user_id from user_metadata where 1 != 1",
- "Query": "select user_metadata.user_id from user_metadata where user_metadata.user_id = :user_extra_user_id",
- "Table": "user_metadata",
- "Values": [
- ":user_extra_user_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_col": 0
- },
- "TableName": "`user`_user_extra, user_metadata",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_metadata.user_id from user_extra, user_metadata where 1 != 1",
- "Query": "select user_metadata.user_id from user_extra, user_metadata where user_extra.col = :user_col and user_extra.user_id = user_metadata.user_id",
- "Table": "user_extra, user_metadata"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra",
- "user.user_metadata"
- ]
-}
-
-# Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates
-"SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456"
-{
- "QueryType": "SELECT",
- "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where 1 != 1",
- "Query": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where `user`.id = 123 and music.id = 456",
- "Table": "`user`, music_extra, music",
- "Values": [
- "INT64(123)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id from `user`, music_extra, music where 1 != 1",
- "Query": "select `user`.id from `user`, music_extra, music where music.id = 456 and `user`.id = 123 and `user`.id = music_extra.user_id and music_extra.user_id = music.user_id",
- "Table": "`user`, music, music_extra",
- "Values": [
- "INT64(123)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music",
- "user.music_extra",
- "user.user"
- ]
-}
-
-# SQL_CALC_FOUND_ROWS with vindex lookup
-"select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2"
-{
- "QueryType": "SELECT",
- "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where `name` = 'aa'",
- "Table": "`user`",
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2",
- "Instructions": {
- "OperatorType": "SQL_CALC_FOUND_ROWS",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(2)",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|2) ASC",
- "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit",
- "ResultColumns": 2,
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS count(*)",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "VARCHAR(\"aa\")"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from `user` where 1 != 1",
- "Query": "select count(*) from `user` where `name` = 'aa'",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# `None` route being merged with another route via join predicate on Vindex columns
-"SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1",
- "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id in (null) and `user`.id = 5",
- "Table": "music, `user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, `user` where 1 != 1",
- "Query": "select music.id from music, `user` where music.user_id in (null) and `user`.id = 5 and music.user_id = `user`.id",
- "Table": "`user`, music"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Treating single value tuples as `EqualUnique` routes
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(5))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes, with derived table
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select music.id from music where 1 != 1) as _inner where 1 != 1",
- "Query": "select * from (select music.id from music where music.user_id in ::__vals) as _inner",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.foo = 'bar'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "(INT64(3), INT64(4), INT64(5))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(3), INT64(4), INT64(5))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(1), INT64(2), INT64(3))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) or music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `IN` comparison on Vindex with `None` subquery, as routing predicate
-"SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `IN` comparison on Vindex with `None` subquery, as non-routing predicate
-"SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable scatter subquery
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.genre = 'pop'",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable scatter subquery with `GROUP BY` on unique vindex column
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1 group by music.id",
- "Query": "select music.id from music where music.genre = 'pop' group by music.id",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable scatter subquery with `GROUP BY` on-non vindex column
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)"
-"unsupported: in scatter query: group by column must reference column in SELECT list"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "random(0) AS id",
- "GroupBy": "(1|2)",
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)",
- "OrderBy": "(1|2) ASC",
- "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable scatter subquery with LIMIT
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit",
- "Table": "music"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with `MAX` aggregate and grouped by unique vindex
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id",
- "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable subquery with `MAX` aggregate
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0) AS max(music.id)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id in ::__vals",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with `LIMIT` due to `EqualUnique` route
-"SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select max(music.id) from music where 1 != 1",
- "Query": "select max(music.id) from music where music.user_id = 5 limit 10",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with multiple levels of derived statements
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select * from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1",
- "Query": "select * from (select * from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit)",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
- "Table": "music",
- "Values": [
- "(INT64(5))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in (5) limit 10) as subquery_for_limit) as subquery_for_limit)",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6))"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Unmergeable subquery with multiple levels of derived statements
-"SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music limit :__upper_limit",
- "Table": "music"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals",
- "Table": "music",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null))",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge
-"SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# `None` subquery nested inside `OR` expression - outer query keeps routing information
-"SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "None",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.user_id in (null)",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5",
- "Table": "music"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5",
- "Table": "music"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together
-"SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "music_id": 0
- },
- "TableName": "music_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music",
- "Table": "music"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
- "Query": "select 1 from (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = :music_id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
- "Query": "select music.id from music, (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = music.id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Joining with a subquery that uses an `EqualUnique` route can be merged
-"SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
- "Query": "select music.id from music join (select id from music where music.user_id = 5) as other on other.id = music.id",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
- "Query": "select music.id from music, (select id from music where music.user_id = 5) as other where other.id = music.id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Joining with a subquery that has an `IN` route can be merged
-"SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1",
- "Query": "select music.id from music join (select id from music where music.user_id in (5, 6, 7)) as other on other.id = music.id",
- "Table": "music"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1",
- "Query": "select music.id from music, (select id from music where music.user_id in ::__vals) as other where other.id = music.id",
- "Table": "music",
- "Values": [
- "(INT64(5), INT64(6), INT64(7))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# limit on the vtgate has to be executed on the LHS of a join
-"select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "ue_user_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra limit :__upper_limit",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where `user`.id = :ue_user_id",
- "Table": "`user`",
- "Values": [
- ":ue_user_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-"select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id"
-"unsupported: filtering on results of cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "t_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0,
- 1
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS b",
- "GroupBy": "(0|3), (2|4)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra where 1 != 1 group by id, weight_string(id), req, weight_string(req)",
- "OrderBy": "(0|3) ASC, (2|4) ASC",
- "Query": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra group by id, weight_string(id), req, weight_string(req) order by id asc, req asc",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.a from `user` where 1 != 1",
- "Query": "select `user`.a from `user` where `user`.id = :t_id",
- "Table": "`user`",
- "Values": [
- ":t_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# cant switch sides for outer joins
-"select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id"
-"unsupported: LEFT JOIN not supported for derived tables"
-Gen4 plan same as above
-
-# limit on both sides means that we can't evaluate this at all
-"select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id"
-"unsupported: filtering on results of cross-shard subquery"
-Gen4 error: unsupported: JOIN not supported between derived tables
-
-"SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id"
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "other_maxt": 0
- },
- "TableName": "music_music",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select other.maxt from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1",
- "Query": "select other.maxt from (select max(id) as maxt from music where music.user_id = 5) as other",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from music where 1 != 1",
- "Query": "select music.id from music where music.id = :other_maxt",
- "Table": "music",
- "Values": [
- ":other_maxt"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1",
- "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id",
- "Table": "music",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246
-"SELECT 1 as x, (SELECT x)"
-{
- "QueryType": "SELECT",
- "Original": "SELECT 1 as x, (SELECT x)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
- "Query": "select 1 as x, (select x from dual) from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT 1 as x, (SELECT x)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1",
- "Query": "select 1 as x, (select x from dual) from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json
new file mode 100644
index 00000000000..5817157752b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json
@@ -0,0 +1,69 @@
+[
+ {
+ "comment": "EXISTS subquery when the default ks is different than the inner query",
+ "query": "select exists(select * from user where id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where id = 5 limit 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "second_user",
+ "Sharded": true
+ },
+ "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
+ "Query": "select :__sq_has_values1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "second_user.dual",
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.txt b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.txt
deleted file mode 100644
index 347c07ad4c9..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-# EXISTS subquery when the default ks is different than the inner query
-"select exists(select * from user where id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where id = 5 limit 1",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "second_user",
- "Sharded": true
- },
- "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1",
- "Query": "select :__sq_has_values1 from dual",
- "Table": "dual"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "second_user.dual",
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json
new file mode 100644
index 00000000000..822ed6c2307
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json
@@ -0,0 +1,48 @@
+[
+ {
+ "comment": "EXISTS subquery",
+ "query": "select exists(select * from user where id = 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select exists(select * from user where id = 5)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
+ "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
+ "Table": "dual",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.dual",
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.txt b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.txt
deleted file mode 100644
index 66afdf93a63..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-# EXISTS subquery
-"select exists(select * from user where id = 5)"
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select exists(select * from user where id = 5)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1",
- "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual",
- "Table": "dual",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.dual",
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.json b/go/vt/vtgate/planbuilder/testdata/set_cases.json
new file mode 100644
index 00000000000..5dee2e63690
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/set_cases.json
@@ -0,0 +1,569 @@
+[
+ {
+ "comment": "set single user defined variable",
+ "query": "set @foo = 42",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = 42",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "INT64(42)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set multi user defined variable",
+ "query": "set @foo = 42, @bar = @foo",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = 42, @bar = @foo",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "INT64(42)"
+ },
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "bar",
+ "Expr": ":__vtudvfoo"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set multi user defined variable with complex expression",
+ "query": "set @foo = 42, @bar = @foo + 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = 42, @bar = @foo + 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "INT64(42)"
+ },
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "bar",
+ "Expr": ":__vtudvfoo + INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set UDV to expression that can't be evaluated at vtgate",
+ "query": "set @foo = CONCAT('Any','Expression','Is','Valid')",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @foo = CONCAT('Any','Expression','Is','Valid')",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "UserDefinedVariable",
+ "Name": "foo",
+ "Expr": "[COLUMN 0]"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "select CONCAT('Any', 'Expression', 'Is', 'Valid') from dual",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "single sysvar cases",
+ "query": "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "multiple sysvar cases",
+ "query": "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO')",
+ "SupportSetVar": true
+ },
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_safe_updates",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "0",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "autocommit case",
+ "query": "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": ":__vtudvmyudv"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(0)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set ignore plan",
+ "query": "set @@default_storage_engine = 'DONOTCHANGEME'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@default_storage_engine = 'DONOTCHANGEME'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "default_storage_engine",
+ "Expr": "'DONOTCHANGEME'"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set check and ignore plan",
+ "query": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set system settings",
+ "query": "set @@sql_safe_updates = 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@sql_safe_updates = 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarSet",
+ "Name": "sql_safe_updates",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "Expr": "1",
+ "SupportSetVar": true
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set plan building with ON/OFF enum",
+ "query": "set @@innodb_strict_mode = OFF",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@innodb_strict_mode = OFF",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "innodb_strict_mode",
+ "Expr": "0"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set plan building with string literal",
+ "query": "set @@innodb_strict_mode = 'OFF'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@innodb_strict_mode = 'OFF'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "innodb_strict_mode",
+ "Expr": "0"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set plan building with string literal",
+ "query": "set @@innodb_tmpdir = 'OFF'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@innodb_tmpdir = 'OFF'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarIgnore",
+ "Name": "innodb_tmpdir",
+ "Expr": "'OFF'"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set system settings",
+ "query": "set @@ndbinfo_max_bytes = 192",
+ "plan": "ndbinfo_max_bytes: system setting is not supported"
+ },
+ {
+ "comment": "set autocommit",
+ "query": "set autocommit = 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set autocommit = 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set autocommit false",
+ "query": "set autocommit = 0",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set autocommit = 0",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(0)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set autocommit with backticks",
+ "query": "set @@session.`autocommit` = 0",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@session.`autocommit` = 0",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(0)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "more vitess aware settings",
+ "query": "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "client_found_rows",
+ "Expr": "INT64(0)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "skip_query_plan_cache",
+ "Expr": "INT64(1)"
+ },
+ {
+ "Type": "SysVarAware",
+ "Name": "sql_select_limit",
+ "Expr": "INT64(20)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set autocommit to default",
+ "query": "set @@autocommit = default",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@autocommit = default",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarAware",
+ "Name": "autocommit",
+ "Expr": "INT64(1)"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "set global autocommit to default",
+ "query": "set global autocommit = off",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set global autocommit = off",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarCheckAndIgnore",
+ "Name": "autocommit",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": {},
+ "Expr": "0"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "change read only variable",
+ "query": "set socket = ''",
+ "plan": "variable 'socket' is a read only variable"
+ },
+ {
+ "comment": "set transaction",
+ "query": "set transaction read only",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set transaction read only",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "set vitess_metadata",
+ "query": "set @@vitess_metadata.app_v1= '1'",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@vitess_metadata.app_v1= '1'",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Name": "app_v1",
+ "Value": "1"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.txt b/go/vt/vtgate/planbuilder/testdata/set_cases.txt
deleted file mode 100644
index 4c5e77ff5b5..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/set_cases.txt
+++ /dev/null
@@ -1,566 +0,0 @@
-# set single user defined variable
-"set @foo = 42"
-{
- "QueryType": "SET",
- "Original": "set @foo = 42",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "INT64(42)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set multi user defined variable
-"set @foo = 42, @bar = @foo"
-{
- "QueryType": "SET",
- "Original": "set @foo = 42, @bar = @foo",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "INT64(42)"
- },
- {
- "Type": "UserDefinedVariable",
- "Name": "bar",
- "Expr": ":__vtudvfoo"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set multi user defined variable with complex expression
-"set @foo = 42, @bar = @foo + 1"
-{
- "QueryType": "SET",
- "Original": "set @foo = 42, @bar = @foo + 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "INT64(42)"
- },
- {
- "Type": "UserDefinedVariable",
- "Name": "bar",
- "Expr": ":__vtudvfoo + INT64(1)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set UDV to expression that can't be evaluated at vtgate
-"set @foo = CONCAT('Any','Expression','Is','Valid')"
-{
- "QueryType": "SET",
- "Original": "set @foo = CONCAT('Any','Expression','Is','Valid')",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "UserDefinedVariable",
- "Name": "foo",
- "Expr": "[COLUMN 0]"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "select CONCAT('Any', 'Expression', 'Is', 'Valid') from dual",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# single sysvar cases
-"SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'"
-{
- "QueryType": "SET",
- "Original": "SET sql_mode = 'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "'STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO'",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# multiple sysvar cases
-"SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0"
-{
- "QueryType": "SET",
- "Original": "SET @@SESSION.sql_mode = CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO'), @@SESSION.sql_safe_updates = 0",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "CONCAT(CONCAT(@@sql_mode, ',STRICT_ALL_TABLES'), ',NO_AUTO_VALUE_ON_ZERO')",
- "SupportSetVar": true
- },
- {
- "Type": "SysVarSet",
- "Name": "sql_safe_updates",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "0",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# autocommit case
-"SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`"
-{
- "QueryType": "SET",
- "Original": "SET autocommit = 1, autocommit = on, autocommit = 'on', autocommit = @myudv, autocommit = `on`, autocommit = `off`",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": ":__vtudvmyudv"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(0)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set ignore plan
-"set @@default_storage_engine = 'DONOTCHANGEME'"
-{
- "QueryType": "SET",
- "Original": "set @@default_storage_engine = 'DONOTCHANGEME'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "default_storage_engine",
- "Expr": "'DONOTCHANGEME'"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set check and ignore plan
-"set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')"
-{
- "QueryType": "SET",
- "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set system settings
-"set @@sql_safe_updates = 1"
-{
- "QueryType": "SET",
- "Original": "set @@sql_safe_updates = 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarSet",
- "Name": "sql_safe_updates",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "Expr": "1",
- "SupportSetVar": true
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set plan building with ON/OFF enum
-"set @@innodb_strict_mode = OFF"
-{
- "QueryType": "SET",
- "Original": "set @@innodb_strict_mode = OFF",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "innodb_strict_mode",
- "Expr": "0"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set plan building with string literal
-"set @@innodb_strict_mode = 'OFF'"
-{
- "QueryType": "SET",
- "Original": "set @@innodb_strict_mode = 'OFF'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "innodb_strict_mode",
- "Expr": "0"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set plan building with string literal
-"set @@innodb_tmpdir = 'OFF'"
-{
- "QueryType": "SET",
- "Original": "set @@innodb_tmpdir = 'OFF'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarIgnore",
- "Name": "innodb_tmpdir",
- "Expr": "'OFF'"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set system settings
-"set @@ndbinfo_max_bytes = 192"
-"ndbinfo_max_bytes: system setting is not supported"
-Gen4 plan same as above
-
-# set autocommit
-"set autocommit = 1"
-{
- "QueryType": "SET",
- "Original": "set autocommit = 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set autocommit false
-"set autocommit = 0"
-{
- "QueryType": "SET",
- "Original": "set autocommit = 0",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(0)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set autocommit with backticks
-"set @@session.`autocommit` = 0"
-{
- "QueryType": "SET",
- "Original": "set @@session.`autocommit` = 0",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(0)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# more vitess aware settings
-"set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20"
-{
- "QueryType": "SET",
- "Original": "set client_found_rows = off, skip_query_plan_cache = ON, sql_select_limit=20",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "client_found_rows",
- "Expr": "INT64(0)"
- },
- {
- "Type": "SysVarAware",
- "Name": "skip_query_plan_cache",
- "Expr": "INT64(1)"
- },
- {
- "Type": "SysVarAware",
- "Name": "sql_select_limit",
- "Expr": "INT64(20)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set autocommit to default
-"set @@autocommit = default"
-{
- "QueryType": "SET",
- "Original": "set @@autocommit = default",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarAware",
- "Name": "autocommit",
- "Expr": "INT64(1)"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# set global autocommit to default
-"set global autocommit = off"
-{
- "QueryType": "SET",
- "Original": "set global autocommit = off",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarCheckAndIgnore",
- "Name": "autocommit",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": {},
- "Expr": "0"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# change read only variable
-"set socket = ''"
-"variable 'socket' is a read only variable"
-Gen4 plan same as above
-
-# set transaction
-"set transaction read only"
-{
- "QueryType": "SET",
- "Original": "set transaction read only",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# set vitess_metadata
-"set @@vitess_metadata.app_v1= '1'"
-{
- "QueryType": "SET",
- "Original": "set @@vitess_metadata.app_v1= '1'",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Name": "app_v1",
- "Value": "1"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.json b/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.json
new file mode 100644
index 00000000000..dea7b35ff3e
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.json
@@ -0,0 +1,40 @@
+[
+ {
+ "comment": "set passthrough disabled - check and ignore plan",
+ "query": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1",
+ "plan": {
+ "QueryType": "SET",
+ "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1",
+ "Instructions": {
+ "OperatorType": "Set",
+ "Ops": [
+ {
+ "Type": "SysVarCheckAndIgnore",
+ "Name": "sql_mode",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": {},
+ "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')"
+ },
+ {
+ "Type": "SysVarCheckAndIgnore",
+ "Name": "sql_safe_updates",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": {},
+ "Expr": "1"
+ }
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SingleRow"
+ }
+ ]
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.txt b/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.txt
deleted file mode 100644
index 8a561a5ea59..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/set_sysvar_disabled_cases.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-# set passthrough disabled - check and ignore plan
-"set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1"
-{
- "QueryType": "SET",
- "Original": "set @@sql_mode = concat(@@sql_mode, ',NO_AUTO_CREATE_USER'), @@sql_safe_updates = 1",
- "Instructions": {
- "OperatorType": "Set",
- "Ops": [
- {
- "Type": "SysVarCheckAndIgnore",
- "Name": "sql_mode",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": {},
- "Expr": "concat(@@sql_mode, ',NO_AUTO_CREATE_USER')"
- },
- {
- "Type": "SysVarCheckAndIgnore",
- "Name": "sql_safe_updates",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": {},
- "Expr": "1"
- }
- ],
- "Inputs": [
- {
- "OperatorType": "SingleRow"
- }
- ]
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.json b/go/vt/vtgate/planbuilder/testdata/show_cases.json
new file mode 100644
index 00000000000..f0db84540c3
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/show_cases.json
@@ -0,0 +1,774 @@
+[
+ {
+ "comment": "Show table status without database name or conditions.",
+ "query": "SHOW table StatUs",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a keyspace name",
+ "query": "SHOW table StatUs from main",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs from main",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a keyspace name using IN",
+ "query": "SHOW table StatUs In main",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs In main",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a keyspace name with a condition",
+ "query": "SHOW table StatUs In user WHERE `Rows` > 70",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs In user WHERE `Rows` > 70",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status where `Rows` > 70",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "Show Table status with a Like condition",
+ "query": "SHOW table StatUs LIKe '%a'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "SHOW table StatUs LIKe '%a'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show table status like '%a'",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns from user keyspace",
+ "query": "show full columns from user.user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from user.user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns from main keyspace",
+ "query": "show full columns from unsharded",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from unsharded",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from unsharded",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns pass as dbname in from clause supersedes the qualifier",
+ "query": "show full columns from user.unsharded from main",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from user.unsharded from main",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from unsharded",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns fails as table does not exists in user keyspace",
+ "query": "show full columns from unsharded from user",
+ "plan": "table unsharded not found"
+ },
+ {
+ "comment": "show columns fails as table does not exists in user keyspace",
+ "query": "show full columns from user.unsharded",
+ "plan": "table unsharded not found"
+ },
+ {
+ "comment": "show charset",
+ "query": "show charset",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show charset",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show function",
+ "query": "show function status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show function status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show function status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show privileges",
+ "query": "show privileges",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show privileges",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show privileges",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show procedure status",
+ "query": "show procedure status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show procedure status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show procedure status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show variables",
+ "query": "show variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show variables",
+ "Instructions": {
+ "OperatorType": "ReplaceVariables",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show variables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show global variables",
+ "query": "show global variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global variables",
+ "Instructions": {
+ "OperatorType": "ReplaceVariables",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show global variables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show databases",
+ "query": "show databases",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show databases",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show create database",
+ "query": "show create database user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create database user",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create database `user`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create database system_schema",
+ "query": "show create database mysql",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create database mysql",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create database mysql",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create procedure",
+ "query": "show create procedure proc",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create procedure proc",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create procedure proc",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create procedure from system_schema",
+ "query": "show create procedure information_schema.proc",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create procedure information_schema.proc",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create procedure information_schema.proc",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table on table present in sharded but as unsharded is selected it goes to unsharded keyspace",
+ "query": "show create table user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table with qualifier",
+ "query": "show create table user.user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table user.user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table with unsharded as default keyspace",
+ "query": "show create table unknown",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table unknown",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table unknown",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show create table with table not present with qualifier",
+ "query": "show create table user.unknown",
+ "plan": "table unknown not found"
+ },
+ {
+ "comment": "show create table from system_schema",
+ "query": "show create table information_schema.tables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show create table information_schema.tables",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show create table information_schema.`tables`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show tables",
+ "query": "show tables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show tables",
+ "Instructions": {
+ "OperatorType": "RenameFields",
+ "Columns": [
+ "Tables_in_main"
+ ],
+ "Indices": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show tables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show tables from db",
+ "query": "show tables from user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show tables from user",
+ "Instructions": {
+ "OperatorType": "RenameFields",
+ "Columns": [
+ "Tables_in_user"
+ ],
+ "Indices": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show tables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show tables from system schema",
+ "query": "show tables from performance_schema",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show tables from performance_schema",
+ "Instructions": {
+ "OperatorType": "RenameFields",
+ "Columns": [
+ "Tables_in_performance_schema"
+ ],
+ "Indices": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show tables from performance_schema",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show migrations with db and like",
+ "query": "show vitess_migrations from user like '%format'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_migrations from user like '%format'",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'"
+ }
+ }
+ },
+ {
+ "comment": "show migrations with db and where",
+ "query": "show vitess_migrations from user where id = 5",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_migrations from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "SELECT * FROM _vt.schema_migrations where id = 5"
+ }
+ }
+ },
+ {
+ "comment": "show vgtid",
+ "query": "show global vgtid_executed",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global vgtid_executed",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "vgtid(1) AS global vgtid_executed",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "select 'main' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
+ "ShardNameNeeded": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show gtid",
+ "query": "show global gtid_executed from user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global gtid_executed from user",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AllShards()",
+ "Query": "select 'user' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
+ "ShardNameNeeded": true
+ }
+ }
+ },
+ {
+ "comment": "show warnings",
+ "query": "show warnings",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show warnings",
+ "Instructions": {
+ "OperatorType": "SHOW WARNINGS"
+ }
+ }
+ },
+ {
+ "comment": "show global status",
+ "query": "show global status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show global status",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show plugins",
+ "query": "show plugins",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show plugins",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show engines",
+ "query": "show engines",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show engines",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_shards",
+ "query": "show vitess_shards",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_shards",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_shards"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_tablets",
+ "query": "show vitess_tablets",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_tablets",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_tablets"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_tablets with filter",
+ "query": "show vitess_tablets like '-2%'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_tablets like '-2%'",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_tablets",
+ "Filter": " like '-2%'"
+ }
+ }
+ },
+ {
+ "comment": "show vschema tables",
+ "query": "show vschema tables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vschema tables",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show vschema vindexes",
+ "query": "show vschema vindexes",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vschema vindexes",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show vschema vindexes on a table",
+ "query": "show vschema vindexes on user.user",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vschema vindexes on user.user",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show vitess target",
+ "query": "show vitess_target",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_target",
+ "Instructions": {
+ "OperatorType": "Rows"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_replication_status",
+ "query": "show vitess_replication_status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_replication_status",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_replication_status"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_replication_status with filter",
+ "query": "show vitess_replication_status like 'x'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_replication_status like 'x'",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_replication_status",
+ "Filter": " like 'x'"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_metadata variables",
+ "query": "show vitess_metadata variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_metadata variables",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_metadata variables"
+ }
+ }
+ },
+ {
+ "comment": "show vitess_metadata variables with filter",
+ "query": "show vitess_metadata variables like 'x'",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show vitess_metadata variables like 'x'",
+ "Instructions": {
+ "OperatorType": "ShowExec",
+ "Variant": " vitess_metadata variables",
+ "Filter": " like 'x'"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.txt b/go/vt/vtgate/planbuilder/testdata/show_cases.txt
deleted file mode 100644
index 3dc88f8ae49..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/show_cases.txt
+++ /dev/null
@@ -1,771 +0,0 @@
-# Show table status without database name or conditions.
-"SHOW table StatUs"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a keyspace name
-"SHOW table StatUs from main"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs from main",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a keyspace name using IN
-"SHOW table StatUs In main"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs In main",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a keyspace name with a condition
-"SHOW table StatUs In user WHERE `Rows` > 70"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs In user WHERE `Rows` \u003e 70",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status where `Rows` \u003e 70",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# Show Table status with a Like condition
-"SHOW table StatUs LIKe '%a'"
-{
- "QueryType": "SHOW",
- "Original": "SHOW table StatUs LIKe '%a'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show table status like '%a'",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns from user keyspace
-"show full columns from user.user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from user.user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns from main keyspace
-"show full columns from unsharded"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from unsharded",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from unsharded",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns pass as dbname in from clause supersedes the qualifier
-"show full columns from user.unsharded from main"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from user.unsharded from main",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from unsharded",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns fails as table does not exists in user keyspace
-"show full columns from unsharded from user"
-"table unsharded not found"
-Gen4 plan same as above
-
-# show columns fails as table does not exists in user keyspace
-"show full columns from user.unsharded"
-"table unsharded not found"
-Gen4 plan same as above
-
-# show charset
-"show charset"
-{
- "QueryType": "SHOW",
- "Original": "show charset",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show function
-"show function status"
-{
- "QueryType": "SHOW",
- "Original": "show function status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show function status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show privileges
-"show privileges"
-{
- "QueryType": "SHOW",
- "Original": "show privileges",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show privileges",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show procedure status
-"show procedure status"
-{
- "QueryType": "SHOW",
- "Original": "show procedure status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show procedure status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show variables
-"show variables"
-{
- "QueryType": "SHOW",
- "Original": "show variables",
- "Instructions": {
- "OperatorType": "ReplaceVariables",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show variables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show global variables
-"show global variables"
-{
- "QueryType": "SHOW",
- "Original": "show global variables",
- "Instructions": {
- "OperatorType": "ReplaceVariables",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show global variables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show databases
-"show databases"
-{
- "QueryType": "SHOW",
- "Original": "show databases",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show create database
-"show create database user"
-{
- "QueryType": "SHOW",
- "Original": "show create database user",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create database `user`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create database system_schema
-"show create database mysql"
-{
- "QueryType": "SHOW",
- "Original": "show create database mysql",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create database mysql",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create procedure
-"show create procedure proc"
-{
- "QueryType": "SHOW",
- "Original": "show create procedure proc",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create procedure proc",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create procedure from system_schema
-"show create procedure information_schema.proc"
-{
- "QueryType": "SHOW",
- "Original": "show create procedure information_schema.proc",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create procedure information_schema.proc",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table on table present in sharded but as unsharded is selected it goes to unsharded keyspace
-"show create table user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show create table user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table with qualifier
-"show create table user.user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show create table user.user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table with unsharded as default keyspace
-"show create table unknown"
-{
- "QueryType": "SHOW",
- "Original": "show create table unknown",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table unknown",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show create table with table not present with qualifier
-"show create table user.unknown"
-"table unknown not found"
-Gen4 plan same as above
-
-# show create table from system_schema
-"show create table information_schema.tables"
-{
- "QueryType": "SHOW",
- "Original": "show create table information_schema.tables",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show create table information_schema.`tables`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show tables
-"show tables"
-{
- "QueryType": "SHOW",
- "Original": "show tables",
- "Instructions": {
- "OperatorType": "RenameFields",
- "Columns": [
- "Tables_in_main"
- ],
- "Indices": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show tables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show tables from db
-"show tables from user"
-{
- "QueryType": "SHOW",
- "Original": "show tables from user",
- "Instructions": {
- "OperatorType": "RenameFields",
- "Columns": [
- "Tables_in_user"
- ],
- "Indices": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show tables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show tables from system schema
-"show tables from performance_schema"
-{
- "QueryType": "SHOW",
- "Original": "show tables from performance_schema",
- "Instructions": {
- "OperatorType": "RenameFields",
- "Columns": [
- "Tables_in_performance_schema"
- ],
- "Indices": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show tables from performance_schema",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show migrations with db and like
-"show vitess_migrations from user like '%format'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_migrations from user like '%format'",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'"
- }
-}
-Gen4 plan same as above
-
-# show migrations with db and where
-"show vitess_migrations from user where id = 5"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_migrations from user where id = 5",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "SELECT * FROM _vt.schema_migrations where id = 5"
- }
-}
-Gen4 plan same as above
-
-# show vgtid
-"show global vgtid_executed"
-{
- "QueryType": "SHOW",
- "Original": "show global vgtid_executed",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "vgtid(1) AS global vgtid_executed",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AllShards()",
- "Query": "select 'main' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
- "ShardNameNeeded": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show gtid
-"show global gtid_executed from user"
-{
- "QueryType": "SHOW",
- "Original": "show global gtid_executed from user",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AllShards()",
- "Query": "select 'user' as db_name, @@global.gtid_executed as gtid_executed, :__vt_shard as shard",
- "ShardNameNeeded": true
- }
-}
-Gen4 plan same as above
-
-# show warnings
-"show warnings"
-{
- "QueryType": "SHOW",
- "Original": "show warnings",
- "Instructions": {
- "OperatorType": "SHOW WARNINGS"
- }
-}
-Gen4 plan same as above
-
-# show global status
-"show global status"
-{
- "QueryType": "SHOW",
- "Original": "show global status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show global status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show plugins
-"show plugins"
-{
- "QueryType": "SHOW",
- "Original": "show plugins",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show engines
-"show engines"
-{
- "QueryType": "SHOW",
- "Original": "show engines",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vitess_shards
-"show vitess_shards"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_shards",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_shards"
- }
-}
-Gen4 plan same as above
-
-# show vitess_tablets
-"show vitess_tablets"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_tablets",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_tablets"
- }
-}
-Gen4 plan same as above
-
-# show vitess_tablets with filter
-"show vitess_tablets like '-2%'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_tablets like '-2%'",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_tablets",
- "Filter": " like '-2%'"
- }
-}
-Gen4 plan same as above
-
-# show vschema tables
-"show vschema tables"
-{
- "QueryType": "SHOW",
- "Original": "show vschema tables",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vschema vindexes
-"show vschema vindexes"
-{
- "QueryType": "SHOW",
- "Original": "show vschema vindexes",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vschema vindexes on a table
-"show vschema vindexes on user.user"
-{
- "QueryType": "SHOW",
- "Original": "show vschema vindexes on user.user",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vitess target
-"show vitess_target"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_target",
- "Instructions": {
- "OperatorType": "Rows"
- }
-}
-Gen4 plan same as above
-
-# show vitess_replication_status
-"show vitess_replication_status"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_replication_status",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_replication_status"
- }
-}
-Gen4 plan same as above
-
-# show vitess_replication_status with filter
-"show vitess_replication_status like 'x'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_replication_status like 'x'",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_replication_status",
- "Filter": " like 'x'"
- }
-}
-Gen4 plan same as above
-
-# show vitess_metadata variables
-"show vitess_metadata variables"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_metadata variables",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_metadata variables"
- }
-}
-Gen4 plan same as above
-
-# show vitess_metadata variables with filter
-"show vitess_metadata variables like 'x'"
-{
- "QueryType": "SHOW",
- "Original": "show vitess_metadata variables like 'x'",
- "Instructions": {
- "OperatorType": "ShowExec",
- "Variant": " vitess_metadata variables",
- "Filter": " like 'x'"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.json
new file mode 100644
index 00000000000..47ad51ea354
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.json
@@ -0,0 +1,115 @@
+[
+ {
+ "comment": "show columns from user keyspace",
+ "query": "show full columns from user_extra",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from user_extra",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from user_extra",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show columns from routed table",
+ "query": "show full fields from `route1`",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full fields from `route1`",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from `user`",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show variables",
+ "query": "show variables",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show variables",
+ "Instructions": {
+ "OperatorType": "ReplaceVariables",
+ "Inputs": [
+ {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show variables",
+ "SingleShardOnly": true
+ }
+ ]
+ }
+ }
+ },
+ {
+ "comment": "show full columns from system schema",
+ "query": "show full columns from sys.sys_config",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from sys.sys_config",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from sys.sys_config",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show full columns from system schema replacing qualifier",
+ "query": "show full columns from x.sys_config from sys",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show full columns from x.sys_config from sys",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show full columns from sys.sys_config",
+ "SingleShardOnly": true
+ }
+ }
+ },
+ {
+ "comment": "show global status",
+ "query": "show global status",
+ "plan": {
+ "QueryType": "SHOW",
+ "Original": "show global status",
+ "Instructions": {
+ "OperatorType": "Send",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "TargetDestination": "AnyShard()",
+ "Query": "show global status",
+ "SingleShardOnly": true
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt b/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt
deleted file mode 100644
index 8bb2addd61d..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/show_cases_no_default_keyspace.txt
+++ /dev/null
@@ -1,112 +0,0 @@
-# show columns from user keyspace
-"show full columns from user_extra"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from user_extra",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from user_extra",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show columns from routed table
-"show full fields from `route1`"
-{
- "QueryType": "SHOW",
- "Original": "show full fields from `route1`",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from `user`",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show variables
-"show variables"
-{
- "QueryType": "SHOW",
- "Original": "show variables",
- "Instructions": {
- "OperatorType": "ReplaceVariables",
- "Inputs": [
- {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show variables",
- "SingleShardOnly": true
- }
- ]
- }
-}
-Gen4 plan same as above
-
-# show full columns from system schema
-"show full columns from sys.sys_config"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from sys.sys_config",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from sys.sys_config",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show full columns from system schema replacing qualifier
-"show full columns from x.sys_config from sys"
-{
- "QueryType": "SHOW",
- "Original": "show full columns from x.sys_config from sys",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show full columns from sys.sys_config",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
-
-# show global status
-"show global status"
-{
- "QueryType": "SHOW",
- "Original": "show global status",
- "Instructions": {
- "OperatorType": "Send",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "TargetDestination": "AnyShard()",
- "Query": "show global status",
- "SingleShardOnly": true
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/stream_cases.json b/go/vt/vtgate/planbuilder/testdata/stream_cases.json
new file mode 100644
index 00000000000..c246d59b47f
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/stream_cases.json
@@ -0,0 +1,19 @@
+[
+ {
+ "comment": "stream table",
+ "query": "stream * from music",
+ "plan": {
+ "QueryType": "STREAM",
+ "Original": "stream * from music",
+ "Instructions": {
+ "OperatorType": "MStream",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "TargetDestination": "ExactKeyRange(-)",
+ "Table": "music"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/stream_cases.txt b/go/vt/vtgate/planbuilder/testdata/stream_cases.txt
deleted file mode 100644
index 2d2f1041af4..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/stream_cases.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-#stream table
-"stream * from music"
-{
- "QueryType": "STREAM",
- "Original": "stream * from music",
- "Instructions": {
- "OperatorType": "MStream",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "TargetDestination": "ExactKeyRange(-)",
- "Table": "music"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.json b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json
new file mode 100644
index 00000000000..f15388ea34c
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json
@@ -0,0 +1,90 @@
+[
+ {
+ "comment": "Tests in this file are for testing symtab functionality\n#\n# Column names need not be qualified if they are predefined in vschema and unambiguous.",
+ "query": "select predef2, predef3 from user join unsharded on predef2 = predef3",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "predef2": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select predef2 from `user` where 1 != 1",
+ "Query": "select predef2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select predef3 from unsharded where 1 != 1",
+ "Query": "select predef3 from unsharded where predef3 = :predef2",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "predef2": 0
+ },
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select predef2 from `user` where 1 != 1",
+ "Query": "select predef2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select predef3 from unsharded where 1 != 1",
+ "Query": "select predef3 from unsharded where predef3 = :predef2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "predef1 is in both user and unsharded. So, it's ambiguous.",
+ "query": "select predef1, predef3 from user join unsharded on predef1 = predef3",
+ "v3-plan": "symbol predef1 not found",
+ "gen4-plan": "Column 'predef1' in field list is ambiguous"
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt b/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt
deleted file mode 100644
index ed273ba6bd8..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/symtab_cases.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-# Tests in this file are for testing symtab functionality
-#
-# Column names need not be qualified if they are predefined in vschema and unambiguous.
-"select predef2, predef3 from user join unsharded on predef2 = predef3"
-{
- "QueryType": "SELECT",
- "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "predef2": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select predef2 from `user` where 1 != 1",
- "Query": "select predef2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select predef3 from unsharded where 1 != 1",
- "Query": "select predef3 from unsharded where predef3 = :predef2",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "predef2": 0
- },
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select predef2 from `user` where 1 != 1",
- "Query": "select predef2 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select predef3 from unsharded where 1 != 1",
- "Query": "select predef3 from unsharded where predef3 = :predef2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-
-# predef1 is in both user and unsharded. So, it's ambiguous.
-"select predef1, predef3 from user join unsharded on predef1 = predef3"
-"symbol predef1 not found"
-Gen4 error: Column 'predef1' in field list is ambiguous
diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.json b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json
new file mode 100644
index 00000000000..275a6720e29
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json
@@ -0,0 +1,158 @@
+[
+ {
+ "comment": "max_allowed_packet",
+ "query": "select @@max_allowed_packet from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@max_allowed_packet from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
+ "Query": "select @@max_allowed_packet from dual",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select @@max_allowed_packet from dual",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
+ "Query": "select @@max_allowed_packet from dual",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "unqualified table name",
+ "query": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where 1 != 1",
+ "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname order by t.table_schema asc, t.table_name asc, c.column_name asc",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`, information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where 1 != 1",
+ "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`columns`, information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.columns",
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "system schema query as a subquery",
+ "query": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
+ "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "dual"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
+ "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "dual"
+ },
+ "TablesUsed": [
+ "information_schema.schemata",
+ "main.dual"
+ ]
+ }
+ },
+ {
+ "comment": "system schema query as a derived table",
+ "query": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
+ "Query": "select * from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "information_schema.schemata"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.`1` from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
+ "Query": "select x.`1` from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
+ "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
+ "Table": "information_schema.schemata"
+ },
+ "TablesUsed": [
+ "information_schema.schemata"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt b/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt
deleted file mode 100644
index eab99ec3245..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/sysschema_default.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-# max_allowed_packet
-"select @@max_allowed_packet from dual"
-{
- "QueryType": "SELECT",
- "Original": "select @@max_allowed_packet from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
- "Query": "select @@max_allowed_packet from dual",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select @@max_allowed_packet from dual",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1",
- "Query": "select @@max_allowed_packet from dual",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# unqualified table name
-"select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name"
-{
- "QueryType": "SELECT",
- "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where 1 != 1",
- "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname order by t.table_schema asc, t.table_name asc, c.column_name asc",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`, information_schema.`columns`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where 1 != 1",
- "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`columns`, information_schema.`tables`"
- }
-}
-
-# system schema query as a subquery
-"SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);"
-{
- "QueryType": "SELECT",
- "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
- "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "dual"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1",
- "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "dual"
- },
- "TablesUsed": [
- "main.dual"
- ]
-}
-
-# system schema query as a derived table
-"SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
- "Query": "select * from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "information_schema.schemata"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.`1` from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1",
- "Query": "select x.`1` from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x",
- "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]",
- "Table": "information_schema.schemata"
- }
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/systemtables_cases.txt b/go/vt/vtgate/planbuilder/testdata/systemtables_cases.txt
deleted file mode 100644
index 094cc96e8f0..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/systemtables_cases.txt
+++ /dev/null
@@ -1,1456 +0,0 @@
-# Single information_schema query
-"select col from information_schema.foo"
-{
- "QueryType": "SELECT",
- "Original": "select col from information_schema.foo",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from information_schema.foo where 1 != 1",
- "Query": "select col from information_schema.foo",
- "Table": "information_schema.foo"
- }
-}
-Gen4 plan same as above
-
-# ',' join information_schema
-"select a.id,b.id from information_schema.a as a, information_schema.b as b"
-{
- "QueryType": "SELECT",
- "Original": "select a.id,b.id from information_schema.a as a, information_schema.b as b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.id, b.id from information_schema.a as a, information_schema.b as b where 1 != 1",
- "Query": "select a.id, b.id from information_schema.a as a, information_schema.b as b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-Gen4 plan same as above
-
-# information schema query that uses table_schema
-"select column_name from information_schema.columns where table_schema = (select schema())"
-{
- "QueryType": "SELECT",
- "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
- "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
- "Table": "information_schema.`columns`"
- }
-}
-Gen4 plan same as above
-
-# information schema join
-"select * from information_schema.a join information_schema.b"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a join information_schema.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a join information_schema.b where 1 != 1",
- "Query": "select * from information_schema.a join information_schema.b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a join information_schema.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a, information_schema.b where 1 != 1",
- "Query": "select * from information_schema.a, information_schema.b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-
-# access to unqualified column names in information_schema
-"select * from information_schema.a where b=10"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where b=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where b = 10",
- "Table": "information_schema.a"
- }
-}
-Gen4 plan same as above
-
-# access to qualified column names in information_schema
-"select * from information_schema.a where information_schema.a.b=10"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where information_schema.a.b=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where information_schema.a.b = 10",
- "Table": "information_schema.a"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where information_schema.a.b=10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where a.b = 10",
- "Table": "information_schema.a"
- }
-}
-
-# union of information_schema
-"select * from information_schema.a union select * from information_schema.b"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a union select * from information_schema.b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1 union select * from information_schema.b where 1 != 1",
- "Query": "select * from information_schema.a union select * from information_schema.b",
- "Table": "information_schema.a"
- }
-}
-Gen4 plan same as above
-
-# union between information_schema tables that should not be merged
-"select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"main\")]",
- "Table": "information_schema.`tables`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select distinct * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
- "Query": "select distinct * from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"main\")]",
- "Table": "information_schema.`tables`"
- }
- ]
- }
- ]
- }
-}
-
-# Select from information schema query with two tables that route should be merged
-"SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME"
-{
- "QueryType": "SELECT",
- "Original": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
- "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1",
- "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- }
-}
-
-# Select from information schema query with three tables such that route for 2 should be merged but not for the last.
-"SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME"
-{
- "QueryType": "SELECT",
- "Original": "SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.K",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
- "Query": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where 1 != 1",
- "Query": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME",
- "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.K"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select KCU.DELETE_RULE, S.UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.K as S where 1 != 1",
- "Query": "select KCU.DELETE_RULE, S.UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.K as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
- "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]",
- "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
- "Table": "INFORMATION_SCHEMA.K, INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
- }
-}
-
-#information_schema.routines
-"SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
- "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
- "SysTableTableSchema": "[:v1]",
- "Table": "information_schema.routines"
- }
-}
-Gen4 plan same as above
-
-#information_schema table sizes
-"SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?"
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
- "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[:v1]",
- "Table": "information_schema.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-#information_schema referential contraints
-"SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position"
-{
- "QueryType": "SELECT",
- "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1",
- "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc",
- "SysTableTableSchema": "[:v1, :v2]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where 1 != 1",
- "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null and rc.constraint_schema = :__vtschemaname and kcu.constraint_name = rc.constraint_name order by ordinal_position asc",
- "SysTableTableSchema": "[:v1, :v2]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-# rails query
-"select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'"
-{
- "QueryType": "SELECT",
- "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
- "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-#rails_query 2
-"SELECT * FROM information_schema.schemata WHERE schema_name = 'user'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.schemata where 1 != 1",
- "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\")]",
- "Table": "information_schema.schemata"
- }
-}
-Gen4 plan same as above
-
-#rails_query 3
-"SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
- "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
- "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
- "Table": "information_schema.`tables`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 4
-"SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
- "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
- "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name",
- "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-#rails_query 5
-"SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where 1 != 1",
- "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = :__vtschemaname",
- "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"constraint_schema\")]",
- "Table": "information_schema.check_constraints, information_schema.table_constraints"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where 1 != 1",
- "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where cc.constraint_schema = :__vtschemaname and tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name",
- "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"constraint_schema\"), VARCHAR(\"table_schema\")]",
- "Table": "information_schema.check_constraints, information_schema.table_constraints"
- }
-}
-
-#rails_query 6
-"SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index"
-{
- "QueryType": "SELECT",
- "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
- "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
- "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.statistics"
- }
-}
-Gen4 plan same as above
-
-#rails_query 7
-"SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
- "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
- "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`columns`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 8
-"SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
- "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
- "Table": "information_schema.`processlist`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 9
-"SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery"
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
- "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`tables`"
- }
-}
-Gen4 plan same as above
-
-#rails_query 10
-"SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
- "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name",
- "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`tables`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
- "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery",
- "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# two predicates specifying the database for the same table work if the database is the same
-"SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1",
- "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"a\"), VARCHAR(\"a\")]",
- "Table": "information_schema.check_constraints"
- }
-}
-Gen4 plan same as above
-
-# system schema in where clause of information_schema query
-"SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
- "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
- "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# subquery of information_schema with itself
-"select * from information_schema.a where id in (select * from information_schema.b)"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a where id in (select * from information_schema.b)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a where id in (select * from information_schema.b)",
- "Table": "information_schema.a"
- }
-}
-Gen4 plan same as above
-
-# query trying to query two different keyspaces at the same time
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# information_schema query using database() func
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# table_schema predicate the wrong way around
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# table_name predicate against a routed table
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
- "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# information_schema query with additional predicates
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and other_column = 42"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and other_column = 42",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and other_column = 42",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# able to isolate table_schema value even when hidden inside of ORs
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (other_column = 42 or TABLE_SCHEMA = 'ks') and (other_column = 42 or foobar = 'value')",
- "SysTableTableSchema": "[VARCHAR(\"ks\")]",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
-
-# expand star with information schema
-"select x.table_name from (select a.* from information_schema.key_column_usage a) x"
-{
- "QueryType": "SELECT",
- "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
- "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
- "Table": "information_schema.key_column_usage"
- }
-}
-Gen4 plan same as above
-
-# expand star with information schema in a derived table
-"select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id"
-{
- "QueryType": "SELECT",
- "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "x_id": 1
- },
- "TableName": "information_schema.key_column_usage_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.table_name, x.id from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
- "Query": "select x.table_name, x.id from (select a.* from information_schema.key_column_usage as a) as x",
- "Table": "information_schema.key_column_usage"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :x_id",
- "Table": "`user`",
- "Values": [
- ":x_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "x_id": 0
- },
- "TableName": "information_schema.key_column_usage_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select x.id, x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
- "Query": "select x.id, x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
- "Table": "information_schema.key_column_usage"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where `user`.id = :x_id",
- "Table": "`user`",
- "Values": [
- ":x_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# join of information_schema queries with select stars exprs
-"select a.*, b.* from information_schema.a a, information_schema.b b"
-{
- "QueryType": "SELECT",
- "Original": "select a.*, b.* from information_schema.a a, information_schema.b b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.*, b.* from information_schema.a as a, information_schema.b as b where 1 != 1",
- "Query": "select a.*, b.* from information_schema.a as a, information_schema.b as b",
- "Table": "information_schema.a, information_schema.b"
- }
-}
-Gen4 plan same as above
-
-# join two routes with SysTableTableName entries in LHS and RHS
-"select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b"
-{
- "QueryType": "SELECT",
- "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
- "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b",
- "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a, (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
- "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select * from information_schema.referential_constraints where table_name = :table_name) as b",
- "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
- "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
- }
-}
-
-"select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
- "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# union as a derived table
-"select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t"
-{
- "QueryType": "SELECT",
- "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
- "SysTableTableSchema": "[VARCHAR(\"music\")]",
- "Table": "information_schema.views"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
- "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge system schema queries as long as they have any same table_schema
-"select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.views"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge system schema queries as long as they have any same table_name
-"select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.views"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
- "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
- "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge union subquery with outer query referencing the same system schemas
-"select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))"
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutExists",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3",
- "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]",
- "Table": "information_schema.`tables`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
- "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1",
- "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]",
- "Table": "information_schema.views"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1",
- "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
- "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))",
- "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]",
- "Table": "information_schema.`tables`"
- }
-}
-
-# merge even one side have schema name in derived table
-"select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt"
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`table` as t where 1 != 1",
- "Query": "select id from information_schema.`table` as t where t.schema_name = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.`table`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`columns` where 1 != 1",
- "Query": "select id from information_schema.`columns`",
- "Table": "information_schema.`columns`"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from (select id from information_schema.`table` as t where 1 != 1 union select id from information_schema.`columns` where 1 != 1) as dt where 1 != 1",
- "Query": "select id from (select id from information_schema.`table` as t where t.schema_name = :__vtschemaname union select id from information_schema.`columns`) as dt",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.`table`"
- }
-}
-
-# merge even one side have schema name in subquery
-"select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)"
-{
- "QueryType": "SELECT",
- "Original": "select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`table` as t where 1 != 1",
- "Query": "select id from information_schema.`table` as t where t.schema_name = :__vtschemaname",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.`table`"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.`columns` where 1 != 1",
- "Query": "select id from information_schema.`columns`",
- "Table": "information_schema.`columns`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.random as t where 1 != 1",
- "Query": "select id from information_schema.random as t where :__sq_has_values1 = 1 and t.col in ::__sq1",
- "Table": "information_schema.random"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select id from information_schema.random as t where 1 != 1",
- "Query": "select id from information_schema.random as t where t.col in (select id from information_schema.`table` as t where t.schema_name = :__vtschemaname union select id from information_schema.`columns`)",
- "SysTableTableSchema": "[VARCHAR(\"a\")]",
- "Table": "information_schema.random"
- }
-}
-
-# systable union query in derived table with constraint on outside (star projection)
-"select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'"
-"symbol constraint_name not found in table or subquery"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from (select * from information_schema.key_column_usage as kcu where 1 != 1 union select * from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1",
- "Query": "select * from (select * from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name and constraint_name = 'primary' union select * from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1 and constraint_name = 'primary') as kcu",
- "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]",
- "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
- "Table": "information_schema.key_column_usage"
- }
-}
-
-# table_schema OR predicate
-# It is unsupported because we do not route queries to multiple keyspaces right now
-"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
- "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
- "Table": "INFORMATION_SCHEMA.`TABLES`"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/systemtables_cases57.json b/go/vt/vtgate/planbuilder/testdata/systemtables_cases57.json
new file mode 100644
index 00000000000..a056ca24fec
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/systemtables_cases57.json
@@ -0,0 +1,1880 @@
+[
+ {
+ "comment": "Single information_schema query",
+ "query": "select TABLE_NAME from information_schema.TABLES",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.TABLES",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`TABLES`",
+ "Table": "information_schema.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.TABLES",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`TABLES`",
+ "Table": "information_schema.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "',' join information_schema",
+ "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1",
+ "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b",
+ "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1",
+ "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b",
+ "Table": "information_schema.`COLUMNS`, information_schema.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.COLUMNS",
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information schema query that uses table_schema",
+ "query": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
+ "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
+ "Table": "information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
+ "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
+ "Table": "information_schema.`columns`"
+ },
+ "TablesUsed": [
+ "information_schema.columns"
+ ]
+ }
+ },
+ {
+ "comment": "information schema join",
+ "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files",
+ "v3-plan": "symbol `tables`.TABLE_SCHEMA not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `tables`.TABLE_SCHEMA, files.`STATUS` from information_schema.`tables`, information_schema.files where 1 != 1",
+ "Query": "select `tables`.TABLE_SCHEMA, files.`STATUS` from information_schema.`tables`, information_schema.files",
+ "Table": "information_schema.`tables`, information_schema.files"
+ },
+ "TablesUsed": [
+ "information_schema.files",
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "access to qualified column names in information_schema",
+ "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1",
+ "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, `PRIVILEGES`, COLUMN_COMMENT, GENERATION_EXPRESSION from information_schema.`COLUMNS` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, `PRIVILEGES`, COLUMN_COMMENT, GENERATION_EXPRESSION from information_schema.`COLUMNS` where `COLUMNS`.COLUMN_NAME = 'toto'",
+ "Table": "information_schema.`COLUMNS`"
+ },
+ "TablesUsed": [
+ "information_schema.COLUMNS"
+ ]
+ }
+ },
+ {
+ "comment": "union of information_schema",
+ "query": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`",
+ "Table": "information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`",
+ "Table": "information_schema.`columns`"
+ },
+ "TablesUsed": [
+ "information_schema.columns",
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "union between information_schema tables that should not be merged",
+ "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:21)",
+ "(1:22)",
+ "(2:23)",
+ "(3:24)",
+ "(4:25)",
+ "5: binary",
+ "(6:26)",
+ "7: binary",
+ "8: binary",
+ "9: binary",
+ "10: binary",
+ "11: binary",
+ "12: binary",
+ "13: binary",
+ "(14:27)",
+ "(15:28)",
+ "(16:29)",
+ "(17:30)",
+ "18: binary",
+ "(19:31)",
+ "(20:32)"
+ ],
+ "ResultColumns": 21,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "Select from information schema query with two tables that route should be merged",
+ "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1",
+ "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ },
+ "TablesUsed": [
+ "information_schema.KEY_COLUMN_USAGE",
+ "information_schema.REFERENTIAL_CONSTRAINTS"
+ ]
+ }
+ },
+ {
+ "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.",
+ "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1",
+ "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME",
+ "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where 1 != 1",
+ "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS, INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.KEY_COLUMN_USAGE",
+ "information_schema.REFERENTIAL_CONSTRAINTS",
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema.routines",
+ "query": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
+ "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.routines"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
+ "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.routines"
+ },
+ "TablesUsed": [
+ "information_schema.routines"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema table sizes",
+ "query": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
+ "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
+ "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema referential contraints",
+ "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null and rc.constraint_schema = :__vtschemaname and kcu.constraint_name = rc.constraint_name order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails query",
+ "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 2",
+ "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.schemata where 1 != 1",
+ "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where 1 != 1",
+ "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ },
+ "TablesUsed": [
+ "information_schema.schemata"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 3",
+ "query": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
+ "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
+ "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 4",
+ "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 6",
+ "query": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
+ "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.statistics"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
+ "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.statistics"
+ },
+ "TablesUsed": [
+ "information_schema.statistics"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 7",
+ "query": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
+ "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
+ "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`columns`"
+ },
+ "TablesUsed": [
+ "information_schema.columns"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 8",
+ "query": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
+ "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
+ "Table": "information_schema.`processlist`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
+ "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
+ "Table": "information_schema.`processlist`"
+ },
+ "TablesUsed": [
+ "information_schema.processlist"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 9",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 10",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name",
+ "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "system schema in where clause of information_schema query",
+ "query": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
+ "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
+ "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "subquery of information_schema with itself",
+ "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)",
+ "v3-plan": "symbol `TABLES`.`CHECKSUM` not found",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `TABLES`.`CHECKSUM` from information_schema.`TABLES` where 1 != 1",
+ "Query": "select `TABLES`.`CHECKSUM` from information_schema.`TABLES` where TABLE_NAME in (select TABLE_NAME from information_schema.`COLUMNS`)",
+ "Table": "information_schema.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.COLUMNS",
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "query trying to query two different keyspaces at the same time",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema query using database() func",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "table_schema predicate the wrong way around",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "table_name predicate against a routed table",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema query with additional predicates",
+ "query": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and DATA_FREE = 42",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT `TABLE_NAME` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and DATA_FREE = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and DATA_FREE = 42",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "able to isolate table_schema value even when hidden inside of ORs",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or TABLE_SCHEMA = 'ks') and (DATA_FREE = 42 or `CHECKSUM` = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or TABLE_SCHEMA = 'ks') and (DATA_FREE = 42 or `CHECKSUM` = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "expand star with information schema",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage"
+ ]
+ }
+ },
+ {
+ "comment": "expand star with information schema in a derived table",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "x_COLUMN_NAME": 1
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME",
+ "Table": "`user`",
+ "Values": [
+ ":x_COLUMN_NAME"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "x_COLUMN_NAME": 0
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.COLUMN_NAME, x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.COLUMN_NAME, x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME",
+ "Table": "`user`",
+ "Values": [
+ ":x_COLUMN_NAME"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "join of information_schema queries with select stars exprs",
+ "query": "select a.*, b.* from information_schema.a a, information_schema.b b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.a a, information_schema.b b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.*, b.* from information_schema.a as a, information_schema.b as b where 1 != 1",
+ "Query": "select a.*, b.* from information_schema.a as a, information_schema.b as b",
+ "Table": "information_schema.a, information_schema.b"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.a a, information_schema.b b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.*, b.* from information_schema.a as a, information_schema.b as b where 1 != 1",
+ "Query": "select a.*, b.* from information_schema.a as a, information_schema.b as b",
+ "Table": "information_schema.a, information_schema.b"
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "information_schema.b"
+ ]
+ }
+ },
+ {
+ "comment": "join two routes with SysTableTableName entries in LHS and RHS",
+ "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "union as a derived table",
+ "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_schema",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_name",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge union subquery with outer query referencing the same system schemas",
+ "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3",
+ "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1",
+ "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in derived table",
+ "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1",
+ "Query": "select TABLE_NAME from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1",
+ "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select TABLE_NAME from information_schema.`columns`) as dt",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.columns",
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in subquery",
+ "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1",
+ "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1",
+ "Query": "select COLUMN_NAME from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1",
+ "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values1 = 1 and COLUMN_NAME in ::__sq1",
+ "Table": "information_schema.`COLUMNS`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1",
+ "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select COLUMN_NAME from information_schema.`columns`)",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`COLUMNS`"
+ },
+ "TablesUsed": [
+ "information_schema.COLUMNS",
+ "information_schema.columns",
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/systemtables_cases80.json b/go/vt/vtgate/planbuilder/testdata/systemtables_cases80.json
new file mode 100644
index 00000000000..41b30d32151
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/systemtables_cases80.json
@@ -0,0 +1,1870 @@
+[
+ {
+ "comment": "Single information_schema query",
+ "query": "select col from information_schema.foo",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from information_schema.foo",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from information_schema.foo where 1 != 1",
+ "Query": "select col from information_schema.foo",
+ "Table": "information_schema.foo"
+ }
+ },
+ "gen4-plan": "symbol col not found"
+ },
+ {
+ "comment": "',' join information_schema",
+ "query": "select a.id,b.id from information_schema.a as a, information_schema.b as b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.id,b.id from information_schema.a as a, information_schema.b as b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.id, b.id from information_schema.a as a, information_schema.b as b where 1 != 1",
+ "Query": "select a.id, b.id from information_schema.a as a, information_schema.b as b",
+ "Table": "information_schema.a, information_schema.b"
+ }
+ },
+ "gen4-plan": "symbol b.id not found"
+ },
+ {
+ "comment": "information schema query that uses table_schema",
+ "query": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
+ "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
+ "Table": "information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select column_name from information_schema.columns where table_schema = (select schema())",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.`columns` where 1 != 1",
+ "Query": "select column_name from information_schema.`columns` where table_schema = schema()",
+ "Table": "information_schema.`columns`"
+ },
+ "TablesUsed": [
+ "information_schema.columns"
+ ]
+ }
+ },
+ {
+ "comment": "information schema join",
+ "query": "select * from information_schema.a join information_schema.b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a join information_schema.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a join information_schema.b where 1 != 1",
+ "Query": "select * from information_schema.a join information_schema.b",
+ "Table": "information_schema.a, information_schema.b"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a join information_schema.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a, information_schema.b where 1 != 1",
+ "Query": "select * from information_schema.a, information_schema.b",
+ "Table": "information_schema.a, information_schema.b"
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "information_schema.b"
+ ]
+ }
+ },
+ {
+ "comment": "access to unqualified column names in information_schema",
+ "query": "select * from information_schema.a where b=10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a where b=10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select * from information_schema.a where b = 10",
+ "Table": "information_schema.a"
+ }
+ },
+ "gen4-plan": "symbol b not found"
+ },
+ {
+ "comment": "access to qualified column names in information_schema",
+ "query": "select * from information_schema.a where information_schema.a.b=10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a where information_schema.a.b=10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select * from information_schema.a where information_schema.a.b = 10",
+ "Table": "information_schema.a"
+ }
+ },
+ "gen4-plan": "symbol information_schema.a.b not found"
+ },
+ {
+ "comment": "union of information_schema",
+ "query": "select * from information_schema.a union select * from information_schema.b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a union select * from information_schema.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1 union select * from information_schema.b where 1 != 1",
+ "Query": "select * from information_schema.a union select * from information_schema.b",
+ "Table": "information_schema.a"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a union select * from information_schema.b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1 union select * from information_schema.b where 1 != 1",
+ "Query": "select * from information_schema.a union select * from information_schema.b",
+ "Table": "information_schema.a"
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "information_schema.b"
+ ]
+ }
+ },
+ {
+ "comment": "union between information_schema tables that should not be merged",
+ "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.`tables` where 1 != 1",
+ "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:21)",
+ "(1:22)",
+ "(2:23)",
+ "(3:24)",
+ "(4:25)",
+ "5: binary",
+ "(6:26)",
+ "7: binary",
+ "8: binary",
+ "9: binary",
+ "10: binary",
+ "11: binary",
+ "12: binary",
+ "13: binary",
+ "(14:27)",
+ "(15:28)",
+ "(16:29)",
+ "(17:30)",
+ "18: binary",
+ "(19:31)",
+ "(20:32)"
+ ],
+ "ResultColumns": 21,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1",
+ "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"main\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "Select from information schema query with two tables that route should be merged",
+ "query": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DELETE_RULE, UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1",
+ "Query": "select DELETE_RULE, UPDATE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = 'test' and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ },
+ "TablesUsed": [
+ "information_schema.KEY_COLUMN_USAGE",
+ "information_schema.REFERENTIAL_CONSTRAINTS"
+ ]
+ }
+ },
+ {
+ "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.",
+ "query": "SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT KCU.DELETE_RULE, S.UPDATE_RULE FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.K AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.K",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1",
+ "Query": "select KCU.DELETE_RULE from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc",
+ "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where 1 != 1",
+ "Query": "select S.UPDATE_RULE from INFORMATION_SCHEMA.K as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME",
+ "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]",
+ "SysTableTableSchema": "[VARCHAR(\"test\")]",
+ "Table": "INFORMATION_SCHEMA.K"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "symbol S.TABLE_NAME not found"
+ },
+ {
+ "comment": "information_schema.routines",
+ "query": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
+ "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.routines"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT routine_name AS name, routine_definition AS definition FROM information_schema.routines WHERE ROUTINE_SCHEMA = ? AND ROUTINE_TYPE = 'PROCEDURE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1",
+ "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.routines"
+ },
+ "TablesUsed": [
+ "information_schema.routines"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema table sizes",
+ "query": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
+ "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(data_length + index_length) as size FROM information_schema.TABLES WHERE table_schema = ?",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1",
+ "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[:v1]",
+ "Table": "information_schema.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema referential contraints",
+ "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where 1 != 1",
+ "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu, information_schema.referential_constraints as rc where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null and rc.constraint_schema = :__vtschemaname and kcu.constraint_name = rc.constraint_name order by ordinal_position asc",
+ "SysTableTableSchema": "[:v1, :v2]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails query",
+ "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 2",
+ "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.schemata where 1 != 1",
+ "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where 1 != 1",
+ "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\")]",
+ "Table": "information_schema.schemata"
+ },
+ "TablesUsed": [
+ "information_schema.schemata"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 3",
+ "query": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
+ "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_comment FROM information_schema.tables WHERE table_schema = 'schema_name' AND table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1",
+ "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"schema_name\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 4",
+ "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.referential_constraints, information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1",
+ "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name",
+ "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 5",
+ "query": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where 1 != 1",
+ "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = :__vtschemaname",
+ "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"constraint_schema\")]",
+ "Table": "information_schema.check_constraints, information_schema.table_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where 1 != 1",
+ "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc, information_schema.table_constraints as tc where cc.constraint_schema = :__vtschemaname and tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = tc.constraint_schema and cc.constraint_name = tc.constraint_name",
+ "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"constraint_schema\"), VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.check_constraints, information_schema.table_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.check_constraints",
+ "information_schema.table_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 6",
+ "query": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
+ "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.statistics"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT column_name FROM information_schema.statistics WHERE index_name = 'PRIMARY' AND table_schema = 'table_schema' AND table_name = 'table_name' ORDER BY seq_in_index",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select column_name from information_schema.statistics where 1 != 1",
+ "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.statistics"
+ },
+ "TablesUsed": [
+ "information_schema.statistics"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 7",
+ "query": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
+ "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`columns`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT generation_expression FROM information_schema.columns WHERE table_schema = 'table_schema' AND table_name = 'table_name' AND column_name = 'column_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1",
+ "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'",
+ "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`columns`"
+ },
+ "TablesUsed": [
+ "information_schema.columns"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 8",
+ "query": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
+ "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
+ "Table": "information_schema.`processlist`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT id FROM information_schema.processlist WHERE info LIKE '% FOR UPDATE'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`processlist` where 1 != 1",
+ "Query": "select id from information_schema.`processlist` where info like '% FOR UPDATE'",
+ "Table": "information_schema.`processlist`"
+ },
+ "TablesUsed": [
+ "information_schema.processlist"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 9",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "rails_query 10",
+ "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name",
+ "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1",
+ "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery",
+ "SysTableTableSchema": "[VARCHAR(\"table_schema\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables"
+ ]
+ }
+ },
+ {
+ "comment": "two predicates specifying the database for the same table work if the database is the same",
+ "query": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT cc.constraint_name AS 'name' FROM information_schema.check_constraints cc WHERE cc.constraint_schema = 'a' AND cc.table_schema = 'a'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1",
+ "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\"), VARCHAR(\"a\")]",
+ "Table": "information_schema.check_constraints"
+ }
+ },
+ "gen4-plan": "symbol cc.table_schema not found"
+ },
+ {
+ "comment": "system schema in where clause of information_schema query",
+ "query": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
+ "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'performance_schema' AND table_name = 'foo'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name",
+ "SysTableTableName": "[table_name:VARCHAR(\"foo\")]",
+ "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "subquery of information_schema with itself",
+ "query": "select * from information_schema.a where id in (select * from information_schema.b)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a where id in (select * from information_schema.b)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select * from information_schema.a where id in (select * from information_schema.b)",
+ "Table": "information_schema.a"
+ }
+ },
+ "gen4-plan": "symbol id not found"
+ },
+ {
+ "comment": "query trying to query two different keyspaces at the same time",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema query using database() func",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "table_schema predicate the wrong way around",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "table_name predicate against a routed table",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME",
+ "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "information_schema query with additional predicates",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and other_column = 42",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' and other_column = 42",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and other_column = 42",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": "symbol other_column not found"
+ },
+ {
+ "comment": "able to isolate table_schema value even when hidden inside of ORs",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and other_column = 42) OR (TABLE_SCHEMA = 'ks' and foobar = 'value')",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (other_column = 42 or TABLE_SCHEMA = 'ks') and (other_column = 42 or foobar = 'value')",
+ "SysTableTableSchema": "[VARCHAR(\"ks\")]",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": "symbol foobar not found"
+ },
+ {
+ "comment": "expand star with information schema",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage"
+ ]
+ }
+ },
+ {
+ "comment": "expand star with information schema in a derived table",
+ "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.id = user.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "x_id": 1
+ },
+ "TableName": "information_schema.key_column_usage_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select x.table_name, x.id from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1",
+ "Query": "select x.table_name, x.id from (select a.* from information_schema.key_column_usage as a) as x",
+ "Table": "information_schema.key_column_usage"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :x_id",
+ "Table": "`user`",
+ "Values": [
+ ":x_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "symbol x.id not found"
+ },
+ {
+ "comment": "join of information_schema queries with select stars exprs",
+ "query": "select a.*, b.* from information_schema.a a, information_schema.b b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.a a, information_schema.b b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.*, b.* from information_schema.a as a, information_schema.b as b where 1 != 1",
+ "Query": "select a.*, b.* from information_schema.a as a, information_schema.b as b",
+ "Table": "information_schema.a, information_schema.b"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.*, b.* from information_schema.a a, information_schema.b b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.*, b.* from information_schema.a as a, information_schema.b as b where 1 != 1",
+ "Query": "select a.*, b.* from information_schema.a as a, information_schema.b as b",
+ "Table": "information_schema.a, information_schema.b"
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "information_schema.b"
+ ]
+ }
+ },
+ {
+ "comment": "join two routes with SysTableTableName entries in LHS and RHS",
+ "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where 1 != 1) as b where 1 != 1",
+ "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name) as b",
+ "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]",
+ "Table": "information_schema.key_column_usage, information_schema.referential_constraints"
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage",
+ "information_schema.referential_constraints"
+ ]
+ }
+ },
+ {
+ "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "union as a derived table",
+ "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1",
+ "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_schema",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge system schema queries as long as they have any same table_name",
+ "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)",
+ "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)",
+ "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge union subquery with outer query referencing the same system schemas",
+ "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutExists",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3",
+ "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.views where 1 != 1",
+ "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1",
+ "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]",
+ "Table": "information_schema.views"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1",
+ "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))",
+ "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]",
+ "Table": "information_schema.`tables`"
+ },
+ "TablesUsed": [
+ "information_schema.tables",
+ "information_schema.views"
+ ]
+ }
+ },
+ {
+ "comment": "merge even one side have schema name in derived table",
+ "query": "select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns) dt",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`table` as t where 1 != 1",
+ "Query": "select id from information_schema.`table` as t where t.schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`table`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`columns` where 1 != 1",
+ "Query": "select id from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": "symbol id not found"
+ },
+ {
+ "comment": "merge even one side have schema name in subquery",
+ "query": "select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from information_schema.random t where t.col in (select id from information_schema.table t where t.schema_name = 'a' union select id from information_schema.columns)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`table` as t where 1 != 1",
+ "Query": "select id from information_schema.`table` as t where t.schema_name = :__vtschemaname",
+ "SysTableTableSchema": "[VARCHAR(\"a\")]",
+ "Table": "information_schema.`table`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.`columns` where 1 != 1",
+ "Query": "select id from information_schema.`columns`",
+ "Table": "information_schema.`columns`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id from information_schema.random as t where 1 != 1",
+ "Query": "select id from information_schema.random as t where :__sq_has_values1 = 1 and t.col in ::__sq1",
+ "Table": "information_schema.random"
+ }
+ ]
+ }
+ },
+ "gen4-plan": "symbol id not found"
+ },
+ {
+ "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now",
+ "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1",
+ "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'",
+ "Table": "INFORMATION_SCHEMA.`TABLES`"
+ },
+ "TablesUsed": [
+ "information_schema.TABLES"
+ ]
+ }
+ },
+ {
+ "comment": "Query from TypeORM",
+ "query": "SELECT * FROM ( SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'fuelings' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'order_payments' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'pools' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'orders' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'order_hops' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'markets' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'market_outlets' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'prefuel' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'billing_statements' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'perp_markets' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'perp_market_outlets' ) `kcu` WHERE `CONSTRAINT_NAME` = 'PRIMARY'",
+ "v3-plan": "symbol CONSTRAINT_NAME not found in table or subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT * FROM ( SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'fuelings' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'order_payments' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'pools' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'orders' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'order_hops' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'markets' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'market_outlets' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'prefuel' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'billing_statements' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'perp_markets' UNION SELECT * FROM `INFORMATION_SCHEMA`.`KEY_COLUMN_USAGE` `kcu` WHERE `kcu`.`TABLE_SCHEMA` = 'rio' AND `kcu`.`TABLE_NAME` = 'perp_market_outlets' ) `kcu` WHERE `CONSTRAINT_NAME` = 'PRIMARY'",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "kcu.CONSTRAINT_NAME = 'PRIMARY'",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1 union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where 1 != 1",
+ "Query": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME1 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME2 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME3 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME4 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME5 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME6 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME7 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME8 union all select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME9 union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as kcu where kcu.TABLE_SCHEMA = :__vtschemaname and kcu.TABLE_NAME = :kcu_TABLE_NAME10",
+ "SysTableTableName": "[kcu_TABLE_NAME10:VARCHAR(\"perp_market_outlets\"), kcu_TABLE_NAME1:VARCHAR(\"order_payments\"), kcu_TABLE_NAME2:VARCHAR(\"pools\"), kcu_TABLE_NAME3:VARCHAR(\"orders\"), kcu_TABLE_NAME4:VARCHAR(\"order_hops\"), kcu_TABLE_NAME5:VARCHAR(\"markets\"), kcu_TABLE_NAME6:VARCHAR(\"market_outlets\"), kcu_TABLE_NAME7:VARCHAR(\"prefuel\"), kcu_TABLE_NAME8:VARCHAR(\"billing_statements\"), kcu_TABLE_NAME9:VARCHAR(\"perp_markets\"), kcu_TABLE_NAME:VARCHAR(\"fuelings\")]",
+ "SysTableTableSchema": "[VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\"), VARCHAR(\"rio\")]",
+ "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.KEY_COLUMN_USAGE"
+ ]
+ }
+ },
+ {
+ "comment": "systable union query in derived table with constraint on outside (without star projection)",
+ "query": "select CONSTRAINT_NAME from (select CONSTRAINT_NAME from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select CONSTRAINT_NAME from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `CONSTRAINT_NAME` = 'primary'",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select CONSTRAINT_NAME from (select CONSTRAINT_NAME from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select CONSTRAINT_NAME from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `CONSTRAINT_NAME` = 'primary'",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Filter",
+ "Predicate": "CONSTRAINT_NAME = 'primary'",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select CONSTRAINT_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select CONSTRAINT_NAME from information_schema.key_column_usage as kcu where 1 != 1",
+ "Query": "select CONSTRAINT_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name union select CONSTRAINT_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1",
+ "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]",
+ "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]",
+ "Table": "information_schema.key_column_usage"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.key_column_usage"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json
new file mode 100644
index 00000000000..ed28ddf599b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json
@@ -0,0 +1,1823 @@
+[
+ {
+ "comment": "TPC-C select join customer1 and warehouse1",
+ "query": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where 1 != 1",
+ "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where w_id = 1 and c_d_id = 15 and c_id = 10",
+ "Table": "customer1, warehouse1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where 1 != 1",
+ "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where c_d_id = 15 and c_id = 10 and w_id = 1 and c_w_id = w_id",
+ "Table": "customer1, warehouse1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1",
+ "main.warehouse1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select district1 for update",
+ "query": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
+ "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
+ "Table": "district1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
+ "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
+ "Table": "district1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update district1 unique",
+ "query": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
+ "Table": "district1",
+ "Values": [
+ "INT64(8546)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
+ "Table": "district1",
+ "Values": [
+ "INT64(8546)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into orders1",
+ "query": "INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into orders1(o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) values (334983, 59896, :_o_w_id_0, 156, now(), 781038, 'hello')",
+ "TableName": "orders1",
+ "VindexValues": {
+ "hash": "INT64(99)"
+ }
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into new_orders1",
+ "query": "INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into new_orders1(no_o_id, no_d_id, no_w_id) values (8, 9, :_no_w_id_0)",
+ "TableName": "new_orders1",
+ "VindexValues": {
+ "hash": "INT64(48)"
+ }
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique item1",
+ "query": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
+ "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
+ "Table": "item1",
+ "Values": [
+ "INT64(9654)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
+ "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
+ "Table": "item1",
+ "Values": [
+ "INT64(9654)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.item1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select stock1 for update",
+ "query": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
+ "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
+ "Table": "stock1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
+ "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
+ "Table": "stock1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update stock1",
+ "query": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
+ "Table": "stock1",
+ "Values": [
+ "INT64(6)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
+ "Table": "stock1",
+ "Values": [
+ "INT64(6)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into order_line1",
+ "query": "INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into order_line1(ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) values (648, 36812, :_ol_w_id_0, 4946378, 3, 7, 89, 1, 'info')",
+ "TableName": "order_line1",
+ "VindexValues": {
+ "hash": "INT64(3201)"
+ }
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update warehouse1 unique",
+ "query": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.warehouse1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(3)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.warehouse1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select warehouse1 unique",
+ "query": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
+ "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(998)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
+ "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
+ "Table": "warehouse1",
+ "Values": [
+ "INT64(998)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.warehouse1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update district1 unique",
+ "query": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(89)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select district1 unique",
+ "query": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
+ "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(896)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
+ "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
+ "Table": "district1",
+ "Values": [
+ "INT64(896)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select aggr from customer1",
+ "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select customer1 order by",
+ "query": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_id from customer1 where 1 != 1",
+ "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_id from customer1 where 1 != 1",
+ "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select for update customer1 unique",
+ "query": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
+ "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8965)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
+ "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
+ "Table": "customer1",
+ "Values": [
+ "INT64(8965)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select customer1 unique",
+ "query": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_data from customer1 where 1 != 1",
+ "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
+ "Table": "customer1",
+ "Values": [
+ "INT64(32)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_data from customer1 where 1 != 1",
+ "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
+ "Table": "customer1",
+ "Values": [
+ "INT64(32)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update customer1 unique and float value",
+ "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update customer1 unique and float value",
+ "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
+ "Table": "customer1",
+ "Values": [
+ "INT64(20)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C insert into history1",
+ "query": "INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')",
+ "plan": {
+ "QueryType": "INSERT",
+ "Original": "INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')",
+ "Instructions": {
+ "OperatorType": "Insert",
+ "Variant": "Sharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "insert into history1(h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) values (6809887, 38748, 8746, 210, :_h_w_id_0, now(), 8907, 'data')",
+ "TableName": "history1",
+ "VindexValues": {
+ "hash": "INT64(8)"
+ }
+ },
+ "TablesUsed": [
+ "main.history1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select aggr customer1",
+ "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(870)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
+ "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
+ "Table": "customer1",
+ "Values": [
+ "INT64(870)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select order by customer1",
+ "query": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(840)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
+ "Table": "customer1",
+ "Values": [
+ "INT64(840)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique customer1",
+ "query": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
+ "Table": "customer1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
+ "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
+ "Table": "customer1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select order by orders1",
+ "query": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
+ "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
+ "Table": "orders1",
+ "Values": [
+ "INT64(9894)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
+ "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
+ "Table": "orders1",
+ "Values": [
+ "INT64(9894)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select order_line1",
+ "query": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
+ "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(92)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
+ "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(92)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select for update new_orders1",
+ "query": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
+ "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
+ "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(15)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete new_orders1",
+ "query": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(98465)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
+ "Table": "new_orders1",
+ "Values": [
+ "INT64(98465)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.new_orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique orders1",
+ "query": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_c_id from orders1 where 1 != 1",
+ "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
+ "Table": "orders1",
+ "Values": [
+ "INT64(894605)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_c_id from orders1 where 1 != 1",
+ "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
+ "Table": "orders1",
+ "Values": [
+ "INT64(894605)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update orders1 unique",
+ "query": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
+ "Table": "orders1",
+ "Values": [
+ "INT64(897)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
+ "Table": "orders1",
+ "Values": [
+ "INT64(897)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update order_line1",
+ "query": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(8)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select sum order_line1",
+ "query": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
+ "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(87)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
+ "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(87)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C update customer1",
+ "query": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
+ "v3-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
+ "Table": "customer1",
+ "Values": [
+ "INT64(160)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "UPDATE",
+ "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
+ "Instructions": {
+ "OperatorType": "Update",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
+ "Table": "customer1",
+ "Values": [
+ "INT64(160)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.customer1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select unique district1",
+ "query": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
+ "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
+ "Table": "district1",
+ "Values": [
+ "INT64(21)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
+ "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
+ "Table": "district1",
+ "Values": [
+ "INT64(21)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.district1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select count distinct stock1 join order_line1",
+ "query": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where 1 != 1",
+ "Query": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id < 30 and ol.ol_o_id >= 15 and s.s_w_id = 12 and s.s_quantity < 10",
+ "Table": "stock1, order_line1",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where 1 != 1",
+ "Query": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where s.s_w_id = 12 and s.s_quantity < 10 and ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id < 30 and ol.ol_o_id >= 15 and ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id",
+ "Table": "order_line1, stock1",
+ "Values": [
+ "INT64(12)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1",
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select distinct order_line1",
+ "query": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
+ "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id < 500 and ol_o_id >= 56",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
+ "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id < 500 and ol_o_id >= 56",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C",
+ "query": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from stock1 where 1 != 1",
+ "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity < 1000",
+ "Table": "stock1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) from stock1 where 1 != 1",
+ "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity < 1000",
+ "Table": "stock1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.stock1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C select with subquery,aggr,distinct,having,limit",
+ "query": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "o_o_c_id": 3,
+ "o_o_d_id": 1,
+ "o_o_w_id": 2
+ },
+ "TableName": "orders1_orders1",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o where 1 != 1",
+ "Query": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o",
+ "Table": "orders1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
+ "Query": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = :o_o_w_id and t.o_d_id = :o_o_d_id and t.o_c_id = :o_o_c_id",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
+ "Query": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete order_line1",
+ "query": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(178)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
+ "Table": "order_line1",
+ "Values": [
+ "INT64(178)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.order_line1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete orders1",
+ "query": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
+ "Table": "orders1",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.orders1"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-C delete history1",
+ "query": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
+ "v3-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
+ "Table": "history1",
+ "Values": [
+ "INT64(75)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.history1"
+ ]
+ },
+ "gen4-plan": {
+ "QueryType": "DELETE",
+ "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
+ "Instructions": {
+ "OperatorType": "Delete",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "TargetTabletType": "PRIMARY",
+ "MultiShardAutocommit": false,
+ "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
+ "Table": "history1",
+ "Values": [
+ "INT64(75)"
+ ],
+ "Vindex": "hash"
+ },
+ "TablesUsed": [
+ "main.history1"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.txt b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.txt
deleted file mode 100644
index ced6e2f5425..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.txt
+++ /dev/null
@@ -1,1784 +0,0 @@
-# TPC-C select join customer1 and warehouse1
-"SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where 1 != 1",
- "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where w_id = 1 and c_d_id = 15 and c_id = 10",
- "Table": "customer1, warehouse1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where 1 != 1",
- "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c, warehouse1 as w where c_d_id = 15 and c_id = 10 and w_id = 1 and c_w_id = w_id",
- "Table": "customer1, warehouse1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1",
- "main.warehouse1"
- ]
-}
-
-# TPC-C select district1 for update
-"SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
- "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
- "Table": "district1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1",
- "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update",
- "Table": "district1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C update district1 unique
-"UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
- "Table": "district1",
- "Values": [
- "INT64(8546)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546",
- "Table": "district1",
- "Values": [
- "INT64(8546)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C insert into orders1
-"INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO orders1 (o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) VALUES (334983,59896,99,156,NOW(),781038,'hello')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into orders1(o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) values (334983, 59896, :_o_w_id_0, 156, now(), 781038, 'hello')",
- "TableName": "orders1",
- "VindexValues": {
- "hash": "INT64(99)"
- }
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C insert into new_orders1
-"INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO new_orders1 (no_o_id, no_d_id, no_w_id) VALUES (8,9,48)",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into new_orders1(no_o_id, no_d_id, no_w_id) values (8, 9, :_no_w_id_0)",
- "TableName": "new_orders1",
- "VindexValues": {
- "hash": "INT64(48)"
- }
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C select unique item1
-"SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654"
-{
- "QueryType": "SELECT",
- "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
- "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
- "Table": "item1",
- "Values": [
- "INT64(9654)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1",
- "Query": "select i_price, i_name, i_data from item1 where i_id = 9654",
- "Table": "item1",
- "Values": [
- "INT64(9654)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.item1"
- ]
-}
-
-# TPC-C select stock1 for update
-"SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
- "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
- "Table": "stock1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1",
- "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update",
- "Table": "stock1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-
-# TPC-C update stock1
-"UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
- "Table": "stock1",
- "Values": [
- "INT64(6)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6",
- "Table": "stock1",
- "Values": [
- "INT64(6)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-
-# TPC-C insert into order_line1
-"INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO order_line1 (ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) VALUES (648,36812,3201,4946378,3,7,89,1,'info')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into order_line1(ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) values (648, 36812, :_ol_w_id_0, 4946378, 3, 7, 89, 1, 'info')",
- "TableName": "order_line1",
- "VindexValues": {
- "hash": "INT64(3201)"
- }
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C update warehouse1 unique
-"UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
- "Table": "warehouse1",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.warehouse1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3",
- "Table": "warehouse1",
- "Values": [
- "INT64(3)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.warehouse1"
- ]
-}
-
-# TPC-C select warehouse1 unique
-"SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998"
-{
- "QueryType": "SELECT",
- "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
- "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
- "Table": "warehouse1",
- "Values": [
- "INT64(998)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1",
- "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998",
- "Table": "warehouse1",
- "Values": [
- "INT64(998)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.warehouse1"
- ]
-}
-
-# TPC-C update district1 unique
-"UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(89)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C select district1 unique
-"SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9"
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
- "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(896)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1",
- "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9",
- "Table": "district1",
- "Values": [
- "INT64(896)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C select aggr from customer1
-"SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select customer1 order by
-"SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_id from customer1 where 1 != 1",
- "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_id from customer1 where 1 != 1",
- "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select for update customer1 unique
-"SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
- "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
- "Table": "customer1",
- "Values": [
- "INT64(8965)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1",
- "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update",
- "Table": "customer1",
- "Values": [
- "INT64(8965)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select customer1 unique
-"SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_data from customer1 where 1 != 1",
- "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
- "Table": "customer1",
- "Values": [
- "INT64(32)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_data from customer1 where 1 != 1",
- "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5",
- "Table": "customer1",
- "Values": [
- "INT64(32)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C update customer1 unique and float value
-"UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C update customer1 unique and float value
-"UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98",
- "Table": "customer1",
- "Values": [
- "INT64(20)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C insert into history1
-"INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')"
-{
- "QueryType": "INSERT",
- "Original": "INSERT INTO history1 (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) VALUES (6809887,38748,8746,210,8,NOW(),8907,'data')",
- "Instructions": {
- "OperatorType": "Insert",
- "Variant": "Sharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "insert into history1(h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) values (6809887, 38748, 8746, 210, :_h_w_id_0, now(), 8907, 'data')",
- "TableName": "history1",
- "VindexValues": {
- "hash": "INT64(8)"
- }
- },
- "TablesUsed": [
- "main.history1"
- ]
-}
-Gen4 plan same as above
-
-# TPC-C select aggr customer1
-"SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'"
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(870)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1",
- "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'",
- "Table": "customer1",
- "Values": [
- "INT64(870)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select order by customer1
-"SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(840)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc",
- "Table": "customer1",
- "Values": [
- "INT64(840)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select unique customer1
-"SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1"
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
- "Table": "customer1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1",
- "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1",
- "Table": "customer1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select order by orders1
-"SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC"
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
- "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
- "Table": "orders1",
- "Values": [
- "INT64(9894)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1",
- "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc",
- "Table": "orders1",
- "Values": [
- "INT64(9894)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C select order_line1
-"SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1"
-{
- "QueryType": "SELECT",
- "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
- "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
- "Table": "order_line1",
- "Values": [
- "INT64(92)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1",
- "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1",
- "Table": "order_line1",
- "Values": [
- "INT64(92)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C select for update new_orders1
-"SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE"
-{
- "QueryType": "SELECT",
- "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
- "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
- "Table": "new_orders1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select no_o_id from new_orders1 where 1 != 1",
- "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update",
- "Table": "new_orders1",
- "Values": [
- "INT64(15)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-
-# TPC-C delete new_orders1
-"DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
- "Table": "new_orders1",
- "Values": [
- "INT64(98465)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465",
- "Table": "new_orders1",
- "Values": [
- "INT64(98465)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.new_orders1"
- ]
-}
-
-# TPC-C select unique orders1
-"SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605"
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_c_id from orders1 where 1 != 1",
- "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
- "Table": "orders1",
- "Values": [
- "INT64(894605)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_c_id from orders1 where 1 != 1",
- "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605",
- "Table": "orders1",
- "Values": [
- "INT64(894605)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C update orders1 unique
-"UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
- "Table": "orders1",
- "Values": [
- "INT64(897)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897",
- "Table": "orders1",
- "Values": [
- "INT64(897)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C update order_line1
-"UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
- "Table": "order_line1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8",
- "Table": "order_line1",
- "Values": [
- "INT64(8)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C select sum order_line1
-"SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87"
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
- "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
- "Table": "order_line1",
- "Values": [
- "INT64(87)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1",
- "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87",
- "Table": "order_line1",
- "Values": [
- "INT64(87)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C update customer1
-"UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160"
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
- "Table": "customer1",
- "Values": [
- "INT64(160)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-{
- "QueryType": "UPDATE",
- "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160",
- "Instructions": {
- "OperatorType": "Update",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160",
- "Table": "customer1",
- "Values": [
- "INT64(160)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.customer1"
- ]
-}
-
-# TPC-C select unique district1
-"SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21"
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
- "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
- "Table": "district1",
- "Values": [
- "INT64(21)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select d_next_o_id from district1 where 1 != 1",
- "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21",
- "Table": "district1",
- "Values": [
- "INT64(21)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.district1"
- ]
-}
-
-# TPC-C select count distinct stock1 join order_line1
-"SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id \u003c 30 AND ol.ol_o_id \u003e= 15 AND s.s_w_id= 12 AND s.s_quantity \u003c 10"
-{
- "QueryType": "SELECT",
- "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id \u003c 30 AND ol.ol_o_id \u003e= 15 AND s.s_w_id= 12 AND s.s_quantity \u003c 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where 1 != 1",
- "Query": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id \u003c 30 and ol.ol_o_id \u003e= 15 and s.s_w_id = 12 and s.s_quantity \u003c 10",
- "Table": "stock1, order_line1",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id \u003c 30 AND ol.ol_o_id \u003e= 15 AND s.s_w_id= 12 AND s.s_quantity \u003c 10",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where 1 != 1",
- "Query": "select count(distinct s.s_i_id) from stock1 as s, order_line1 as ol where s.s_w_id = 12 and s.s_quantity \u003c 10 and ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id \u003c 30 and ol.ol_o_id \u003e= 15 and ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id",
- "Table": "order_line1, stock1",
- "Values": [
- "INT64(12)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1",
- "main.stock1"
- ]
-}
-
-# TPC-C select distinct order_line1
-"SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id \u003c 500 AND ol_o_id \u003e= 56"
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id \u003c 500 AND ol_o_id \u003e= 56",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
- "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id \u003c 500 and ol_o_id \u003e= 56",
- "Table": "order_line1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id \u003c 500 AND ol_o_id \u003e= 56",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select ol_i_id from order_line1 where 1 != 1",
- "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id \u003c 500 and ol_o_id \u003e= 56",
- "Table": "order_line1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C
-"SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity \u003c 1000"
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity \u003c 1000",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from stock1 where 1 != 1",
- "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity \u003c 1000",
- "Table": "stock1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity \u003c 1000",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*) from stock1 where 1 != 1",
- "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity \u003c 1000",
- "Table": "stock1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.stock1"
- ]
-}
-
-# TPC-C select with subquery,aggr,distinct,having,limit
-"select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) \u003e 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1"
-{
- "QueryType": "SELECT",
- "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) \u003e 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "o_o_c_id": 3,
- "o_o_d_id": 1,
- "o_o_w_id": 2
- },
- "TableName": "orders1_orders1",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o where 1 != 1",
- "Query": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o",
- "Table": "orders1"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
- "Query": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) \u003e 1 limit 1) as t where t.o_w_id = :o_o_w_id and t.o_d_id = :o_o_d_id and t.o_c_id = :o_o_c_id",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) \u003e 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1",
- "Query": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id \u003e 2100 and o_id \u003c 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) \u003e 1 limit 1) as t where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C delete order_line1
-"DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
- "Table": "order_line1",
- "Values": [
- "INT64(178)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84",
- "Table": "order_line1",
- "Values": [
- "INT64(178)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.order_line1"
- ]
-}
-
-# TPC-C delete orders1
-"DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384",
- "Table": "orders1",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.orders1"
- ]
-}
-
-# TPC-C delete history1
-"DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10"
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
- "Table": "history1",
- "Values": [
- "INT64(75)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.history1"
- ]
-}
-{
- "QueryType": "DELETE",
- "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10",
- "Instructions": {
- "OperatorType": "Delete",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "TargetTabletType": "PRIMARY",
- "MultiShardAutocommit": false,
- "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10",
- "Table": "history1",
- "Values": [
- "INT64(75)"
- ],
- "Vindex": "hash"
- },
- "TablesUsed": [
- "main.history1"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
new file mode 100644
index 00000000000..90ac48623f3
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json
@@ -0,0 +1,1495 @@
+[
+ {
+ "comment": "TPC-H query 1",
+ "query": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus",
+ "v3-plan": "unsupported: in scatter query: complex aggregate expression",
+ "gen4-plan": "unsupported: in scatter query: aggregation function 'avg'"
+ },
+ {
+ "comment": "TPC-H query 2",
+ "query": "select s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment from part, supplier, partsupp, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and p_size = 15 and p_type like '%BRASS' and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' and ps_supplycost = ( select min(ps_supplycost) from partsupp, supplier, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' ) order by s_acctbal desc, n_name, s_name, p_partkey limit 10",
+ "v3-plan": "symbol p_partkey not found",
+ "gen4-plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "TPC-H query 3",
+ "query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 DESC, (2|5) ASC",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS revenue",
+ "GroupBy": "(0|6), (2|5), (3|4)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as l_orderkey",
+ "([COLUMN 6] * COALESCE([COLUMN 7], INT64(1))) * COALESCE([COLUMN 8], INT64(1)) as revenue",
+ "[COLUMN 1] as o_orderdate",
+ "[COLUMN 2] as o_shippriority",
+ "[COLUMN 5]",
+ "[COLUMN 4]",
+ "[COLUMN 3]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,R:1,L:2,R:2,R:3,L:1,R:4,R:5",
+ "JoinVars": {
+ "l_orderkey": 0
+ },
+ "TableName": "lineitem_orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey)",
+ "Query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where l_shipdate > date('1995-03-15') group by l_orderkey, weight_string(l_orderkey)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:5,L:4,L:6,L:1,R:1",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
+ "Query": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where o_orderdate < date('1995-03-15') and o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
+ "Table": "orders",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from customer where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from customer where c_mktsegment = 'BUILDING' and c_custkey = :o_custkey group by 1",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 4",
+ "query": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority",
+ "v3-plan": "symbol o_orderkey not found in table or subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS order_count",
+ "GroupBy": "(0|2)",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 1] as o_orderpriority",
+ "[COLUMN 2] as order_count",
+ "[COLUMN 3]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "SemiJoin",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where 1 != 1 group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey)",
+ "OrderBy": "(1|3) ASC",
+ "Query": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey) order by o_orderpriority asc",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from lineitem where 1 != 1",
+ "Query": "select 1 from lineitem where l_commitdate < l_receiptdate and l_orderkey = :o_orderkey",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 5 - Gen4 produces plan but the plan output is flaky",
+ "query": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc",
+ "Instructions": {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 DESC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS revenue",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as n_name",
+ "(((([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1))) * COALESCE([COLUMN 5], INT64(1))) * COALESCE([COLUMN 6], INT64(1))) * COALESCE([COLUMN 7], INT64(1)) as revenue",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:3,L:4,L:5,L:6,R:2,R:3",
+ "JoinVars": {
+ "s_nationkey": 0
+ },
+ "TableName": "orders_customer_lineitem_supplier_nation_region",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:2,L:6,L:7,R:3,R:4",
+ "JoinVars": {
+ "c_nationkey": 1,
+ "o_orderkey": 0
+ },
+ "TableName": "orders_customer_lineitem_supplier",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:1,R:0,L:4,R:2,L:2,R:1",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
+ "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
+ "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1",
+ "JoinVars": {
+ "l_suppkey": 0
+ },
+ "TableName": "lineitem_supplier",
+ "Inputs": [
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where 1 != 1 group by l_suppkey, weight_string(l_suppkey)",
+ "Query": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where l_orderkey = :o_orderkey group by l_suppkey, weight_string(l_suppkey)",
+ "Table": "lineitem"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
+ "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey and s_nationkey = :c_nationkey group by s_nationkey, weight_string(s_nationkey)",
+ "Table": "supplier",
+ "Values": [
+ ":l_suppkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
+ "JoinVars": {
+ "n_regionkey": 0
+ },
+ "TableName": "nation_region",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where 1 != 1 group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
+ "Query": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where n_nationkey = :s_nationkey group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
+ "Table": "nation",
+ "Values": [
+ ":s_nationkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from region where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from region where r_name = 'ASIA' and r_regionkey = :n_regionkey group by 1",
+ "Table": "region",
+ "Values": [
+ ":n_regionkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.nation",
+ "main.orders",
+ "main.region",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 6",
+ "query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
+ "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS revenue",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
+ "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24",
+ "Table": "lineitem"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 7",
+ "query": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(3) AS revenue",
+ "GroupBy": "(0|6), (1|5), (2|4)",
+ "ResultColumns": 4,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 4] as supp_nation",
+ "[COLUMN 5] as cust_nation",
+ "[COLUMN 6] as l_year",
+ "(((([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1))) * COALESCE([COLUMN 13], INT64(1))) * COALESCE([COLUMN 14], INT64(1))) * COALESCE([COLUMN 15], INT64(1)) as revenue",
+ "[COLUMN 9]",
+ "[COLUMN 8]",
+ "[COLUMN 7]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|16) ASC, (1|17) ASC, (2|18) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,R:0,L:4,L:5,L:9,R:1,L:10,L:14,R:2,L:15,L:16,L:17,L:18,L:19,R:3,R:4,L:20,R:5,L:21",
+ "JoinVars": {
+ "n1_n_name": 2,
+ "o_custkey": 0
+ },
+ "TableName": "lineitem_orders_supplier_nation_customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,R:1,R:2,L:2,L:3,L:5,R:3,R:4,R:5,L:6,L:8,R:6,R:7,R:8,L:9,L:10,L:11,R:9,R:10,R:11,L:12",
+ "JoinVars": {
+ "l_suppkey": 0
+ },
+ "TableName": "lineitem_orders_supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:2,L:3,L:1,R:0,L:2,L:6,R:2,L:7,L:4,R:1,L:8",
+ "JoinVars": {
+ "l_orderkey": 0
+ },
+ "TableName": "lineitem_orders",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
+ "Query": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31') group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey)",
+ "Query": "select o_custkey, count(*), weight_string(o_custkey) from orders where o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey)",
+ "Table": "orders",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,R:1,R:0,R:0,R:1,R:3,R:3,R:4,L:1,R:2,R:5",
+ "JoinVars": {
+ "s_nationkey": 0
+ },
+ "TableName": "supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
+ "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey group by s_nationkey, weight_string(s_nationkey)",
+ "Table": "supplier",
+ "Values": [
+ ":l_suppkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where 1 != 1 group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
+ "Query": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where n1.n_nationkey = :s_nationkey group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
+ "Table": "nation",
+ "Values": [
+ ":s_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1,R:3",
+ "JoinVars": {
+ "c_nationkey": 0
+ },
+ "TableName": "customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
+ "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where 1 != 1 group by cust_nation, weight_string(cust_nation)",
+ "Query": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where n2.n_nationkey = :c_nationkey and (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') group by cust_nation, weight_string(cust_nation)",
+ "Table": "nation",
+ "Values": [
+ ":c_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.nation",
+ "main.orders",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 8",
+ "query": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "TPC-H query 9",
+ "query": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "aggregation on columns from different sources not supported yet"
+ },
+ {
+ "comment": "TPC-H query 10",
+ "query": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(20)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "2 DESC",
+ "ResultColumns": 8,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(2) AS revenue",
+ "GroupBy": "(0|14), (1|13), (3|12), (6|11), (4|10), (5|9), (7|8)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as c_custkey",
+ "[COLUMN 1] as c_name",
+ "(([COLUMN 14] * COALESCE([COLUMN 15], INT64(1))) * COALESCE([COLUMN 16], INT64(1))) * COALESCE([COLUMN 17], INT64(1)) as revenue",
+ "[COLUMN 2] as c_acctbal",
+ "[COLUMN 4] as n_name",
+ "[COLUMN 5] as c_address",
+ "[COLUMN 3] as c_phone",
+ "[COLUMN 6] as c_comment",
+ "[COLUMN 13]",
+ "[COLUMN 12]",
+ "[COLUMN 11]",
+ "[COLUMN 10]",
+ "[COLUMN 9]",
+ "[COLUMN 8]",
+ "[COLUMN 7]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|7) ASC, (1|8) ASC, (2|9) ASC, (3|10) ASC, (4|11) ASC, (5|12) ASC, (6|13) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,R:2,R:3,R:4,R:5,R:6,R:7,R:8,R:9,R:10,R:11,R:12,R:13,L:3,L:4,R:14,R:15",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_lineitem_customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
+ "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where 1 != 1 group by 1",
+ "Query": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where l_returnflag = 'R' and l_orderkey = :o_orderkey group by 1",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:5,L:7,L:9,R:1,L:11,L:13,L:4,L:6,L:8,L:10,R:2,L:12,L:14,L:1,R:0",
+ "JoinVars": {
+ "c_nationkey": 0
+ },
+ "TableName": "customer_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
+ "Query": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), n_name, weight_string(n_name) from nation where 1 != 1 group by n_name, weight_string(n_name)",
+ "Query": "select count(*), n_name, weight_string(n_name) from nation where n_nationkey = :c_nationkey group by n_name, weight_string(n_name)",
+ "Table": "nation",
+ "Values": [
+ ":c_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.nation",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 11",
+ "query": "select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "TPC-H query 12",
+ "query": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(1) AS high_line_count, sum(2) AS low_line_count",
+ "GroupBy": "(0|3)",
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as l_shipmode",
+ "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as high_line_count",
+ "[COLUMN 4] * COALESCE([COLUMN 5], INT64(1)) as low_line_count",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey)",
+ "Query": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders group by o_orderkey, weight_string(o_orderkey)",
+ "Table": "orders"
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1 group by l_shipmode, weight_string(l_shipmode)",
+ "Query": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey group by l_shipmode, weight_string(l_shipmode)",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 13",
+ "query": "select c_count, count(*) as custdist from ( select c_custkey, count(o_orderkey) from customer left outer join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' group by c_custkey ) as c_orders(c_custkey, c_count) group by c_count order by custdist desc, c_count desc",
+ "plan": "using aggregation on top of a *planbuilder.orderedAggregate plan is not yet supported"
+ },
+ {
+ "comment": "TPC-H query 14",
+ "query": "select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "TPC-H query 15 view\n#\"with revenue0(supplier_no, total_revenue) as (select l_suppkey, sum(l_extendedprice * (1 - l_discount)) from lineitem where l_shipdate >= date('1996-01-01') and l_shipdate < date('1996-01-01') + interval '3' month group by l_suppkey )\"\n#\"syntax error at position 236\"\n#Gen4 plan same as above\n# TPC-H query 15",
+ "query": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
+ "Query": "select max(total_revenue) from revenue0",
+ "Table": "revenue0"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3,R:0",
+ "JoinVars": {
+ "s_suppkey": 0
+ },
+ "TableName": "supplier_revenue0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier where 1 != 1",
+ "OrderBy": "(0|4) ASC",
+ "Query": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier order by s_suppkey asc",
+ "ResultColumns": 4,
+ "Table": "supplier"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select total_revenue from revenue0 where 1 != 1",
+ "Query": "select total_revenue from revenue0 where supplier_no = :s_suppkey and total_revenue = :__sq1",
+ "Table": "revenue0",
+ "Values": [
+ ":s_suppkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "max(0) AS max(total_revenue)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
+ "Query": "select max(total_revenue) from revenue0",
+ "Table": "revenue0"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1",
+ "OrderBy": "(0|5) ASC",
+ "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where total_revenue = :__sq1 and s_suppkey = supplier_no order by s_suppkey asc",
+ "ResultColumns": 5,
+ "Table": "revenue0, supplier"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.revenue0",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 16",
+ "query": "select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand <> 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "using aggregation on top of a *planbuilder.pulloutSubquery plan is not yet supported"
+ },
+ {
+ "comment": "TPC-H query 17",
+ "query": "select sum(l_extendedprice) / 7.0 as avg_yearly from lineitem, part where p_partkey = l_partkey and p_brand = 'Brand#23' and p_container = 'MED BOX' and l_quantity < ( select 0.2 * avg(l_quantity) from lineitem where l_partkey = p_partkey )",
+ "v3-plan": "symbol p_partkey not found in table or subquery",
+ "gen4-plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "TPC-H query 18",
+ "query": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum(5) AS sum(l_quantity)",
+ "GroupBy": "(4|10), (3|9), (0|8), (1|7), (2|6)",
+ "ResultColumns": 6,
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 2] as c_name",
+ "[COLUMN 3] as c_custkey",
+ "[COLUMN 4] as o_orderkey",
+ "[COLUMN 1] as o_orderdate",
+ "[COLUMN 0] as o_totalprice",
+ "([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1)) as sum(l_quantity)",
+ "[COLUMN 9]",
+ "[COLUMN 8]",
+ "[COLUMN 7]",
+ "[COLUMN 6]",
+ "[COLUMN 5]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:2,L:3,L:4,L:5,L:6,L:8,L:9,L:10,L:11,L:12,L:13,L:14,R:1",
+ "JoinVars": {
+ "o_orderkey": 0
+ },
+ "TableName": "orders_customer_lineitem",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(2|8) DESC, (3|9) ASC, (4|10) ASC, (5|11) ASC, (0|7) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:1,L:5,L:7,R:1,R:3,L:1,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0",
+ "JoinVars": {
+ "o_custkey": 0
+ },
+ "TableName": "orders_customer",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
+ "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
+ "Table": "orders",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where 1 != 1 group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
+ "Query": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where c_custkey = :o_custkey group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
+ "Table": "customer",
+ "Values": [
+ ":o_custkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "VindexLookup",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "Values": [
+ ":o_orderkey"
+ ],
+ "Vindex": "lineitem_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
+ "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
+ "Table": "lineitem_map",
+ "Values": [
+ ":l_orderkey"
+ ],
+ "Vindex": "md5"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, sum(l_quantity) from lineitem where 1 != 1 group by 1",
+ "Query": "select 1, sum(l_quantity) from lineitem where l_orderkey = :o_orderkey group by 1",
+ "Table": "lineitem"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.customer",
+ "main.lineitem",
+ "main.orders"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 19",
+ "query": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum(0) AS revenue",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as revenue"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:4,R:1",
+ "JoinVars": {
+ "l_partkey": 0,
+ "l_quantity": 1,
+ "l_shipinstruct": 3,
+ "l_shipmode": 2
+ },
+ "TableName": "lineitem_part",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem where 1 != 1 group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
+ "Query": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) from part where 1 != 1 group by 1",
+ "Query": "select 1, count(*) from part where p_partkey = :l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and :l_quantity >= 1 and :l_quantity <= 1 + 10 and p_size between 1 and 5 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and :l_quantity >= 10 and :l_quantity <= 10 + 10 and p_size between 1 and 10 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and :l_quantity >= 20 and :l_quantity <= 20 + 10 and p_size between 1 and 15 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' group by 1",
+ "Table": "part"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.part"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 20",
+ "query": "select s_name, s_address from supplier, nation where s_suppkey in ( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty > ( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year ) ) and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name",
+ "v3-plan": "symbol ps_partkey not found in table or subquery",
+ "gen4-plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "TPC-H query 21",
+ "query": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(100)",
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 DESC, (0|2) ASC",
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS numwait",
+ "GroupBy": "(0|2)",
+ "Inputs": [
+ {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "[COLUMN 0] as s_name",
+ "(([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1))) * COALESCE([COLUMN 5], INT64(1)) as numwait",
+ "[COLUMN 1]"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "(0|1) ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,R:1,L:3,L:4,R:2,R:3",
+ "JoinVars": {
+ "l1_l_suppkey": 0
+ },
+ "TableName": "lineitem_orders_supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
+ "JoinVars": {
+ "l1_l_orderkey": 0
+ },
+ "TableName": "lineitem_orders",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
+ "Query": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate > l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
+ "Table": "lineitem"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) as numwait from orders where 1 != 1 group by 1",
+ "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and o_orderkey = :l1_l_orderkey group by 1",
+ "Table": "orders",
+ "Values": [
+ ":l1_l_orderkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
+ "JoinVars": {
+ "s_nationkey": 0
+ },
+ "TableName": "supplier_nation",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
+ "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and s_suppkey = :l1_l_suppkey group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
+ "Table": "supplier",
+ "Values": [
+ ":l1_l_suppkey"
+ ],
+ "Vindex": "hash"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, count(*) as numwait from nation where 1 != 1 group by 1",
+ "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and n_nationkey = :s_nationkey group by 1",
+ "Table": "nation",
+ "Values": [
+ ":s_nationkey"
+ ],
+ "Vindex": "hash"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.lineitem",
+ "main.nation",
+ "main.orders",
+ "main.supplier"
+ ]
+ }
+ },
+ {
+ "comment": "TPC-H query 22",
+ "query": "select cntrycode, count(*) as numcust, sum(c_acctbal) as totacctbal from ( select substring(c_phone from 1 for 2) as cntrycode, c_acctbal from customer where substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') and c_acctbal > ( select avg(c_acctbal) from customer where c_acctbal > 0.00 and substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') ) and not exists ( select * from orders where o_custkey = c_custkey ) ) as custsale group by cntrycode order by cntrycode",
+ "v3-plan": "symbol c_custkey not found in table or subquery",
+ "gen4-plan": "exists sub-queries are only supported with AND clause"
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.txt b/go/vt/vtgate/planbuilder/testdata/tpch_cases.txt
deleted file mode 100644
index b63c58a6c12..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.txt
+++ /dev/null
@@ -1,1475 +0,0 @@
-# TPC-H query 1
-"select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate \u003c= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus"
-"unsupported: in scatter query: complex aggregate expression"
-Gen4 error: unsupported: in scatter query: aggregation function 'avg'
-
-# TPC-H query 2
-"select s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment from part, supplier, partsupp, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and p_size = 15 and p_type like '%BRASS' and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' and ps_supplycost = ( select min(ps_supplycost) from partsupp, supplier, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' ) order by s_acctbal desc, n_name, s_name, p_partkey limit 10"
-"symbol p_partkey not found"
-Gen4 error: unsupported: cross-shard correlated subquery
-
-# TPC-H query 3
-"select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003c date('1995-03-15') and l_shipdate \u003e date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003c date('1995-03-15') and l_shipdate \u003e date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 DESC, (2|5) ASC",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS revenue",
- "GroupBy": "(0|6), (2|5), (3|4)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as l_orderkey",
- "([COLUMN 6] * [COLUMN 7]) * [COLUMN 8] as revenue",
- "[COLUMN 1] as o_orderdate",
- "[COLUMN 2] as o_shippriority",
- "[COLUMN 5]",
- "[COLUMN 4]",
- "[COLUMN 3]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,R:1,L:2,R:2,R:3,L:1,R:4,R:5",
- "JoinVars": {
- "l_orderkey": 0
- },
- "TableName": "lineitem_orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey)",
- "Query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where l_shipdate \u003e date('1995-03-15') group by l_orderkey, weight_string(l_orderkey)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:5,L:4,L:6,L:1,R:1",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
- "Query": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where o_orderdate \u003c date('1995-03-15') and o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)",
- "Table": "orders",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from customer where 1 != 1 group by 1",
- "Query": "select 1, count(*) from customer where c_mktsegment = 'BUILDING' and c_custkey = :o_custkey group by 1",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 4
-"select o_orderpriority, count(*) as order_count from orders where o_orderdate \u003e= date('1993-07-01') and o_orderdate \u003c date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate \u003c l_receiptdate ) group by o_orderpriority order by o_orderpriority"
-"symbol o_orderkey not found in table or subquery"
-{
- "QueryType": "SELECT",
- "Original": "select o_orderpriority, count(*) as order_count from orders where o_orderdate \u003e= date('1993-07-01') and o_orderdate \u003c date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate \u003c l_receiptdate ) group by o_orderpriority order by o_orderpriority",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS order_count",
- "GroupBy": "(0|2)",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 1] as o_orderpriority",
- "[COLUMN 2] as order_count",
- "[COLUMN 3]"
- ],
- "Inputs": [
- {
- "OperatorType": "SemiJoin",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_lineitem",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where 1 != 1 group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey)",
- "OrderBy": "(1|3) ASC",
- "Query": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where o_orderdate \u003e= date('1993-07-01') and o_orderdate \u003c date('1993-07-01') + interval '3' month group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey) order by o_orderpriority asc",
- "Table": "orders"
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1 from lineitem where 1 != 1",
- "Query": "select 1 from lineitem where l_commitdate \u003c l_receiptdate and l_orderkey = :o_orderkey",
- "Table": "lineitem"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 5 - Gen4 produces plan but the plan output is flaky
-"select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate \u003e= date('1994-01-01') and o_orderdate \u003c date('1994-01-01') + interval '1' year group by n_name order by revenue desc"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate \u003e= date('1994-01-01') and o_orderdate \u003c date('1994-01-01') + interval '1' year group by n_name order by revenue desc",
- "Instructions": {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 DESC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS revenue",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as n_name",
- "(((([COLUMN 2] * [COLUMN 3]) * [COLUMN 4]) * [COLUMN 5]) * [COLUMN 6]) * [COLUMN 7] as revenue",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:3,L:4,L:5,L:6,R:2,R:3",
- "JoinVars": {
- "s_nationkey": 0
- },
- "TableName": "orders_customer_lineitem_supplier_nation_region",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,R:2,L:6,L:7,R:3,R:4",
- "JoinVars": {
- "c_nationkey": 1,
- "o_orderkey": 0
- },
- "TableName": "orders_customer_lineitem_supplier",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:1,R:0,L:4,R:2,L:2,R:1",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
- "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where o_orderdate \u003e= date('1994-01-01') and o_orderdate \u003c date('1994-01-01') + interval '1' year group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)",
- "Table": "orders"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
- "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1",
- "JoinVars": {
- "l_suppkey": 0
- },
- "TableName": "lineitem_supplier",
- "Inputs": [
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where 1 != 1 group by l_suppkey, weight_string(l_suppkey)",
- "Query": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where l_orderkey = :o_orderkey group by l_suppkey, weight_string(l_suppkey)",
- "Table": "lineitem"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
- "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey and s_nationkey = :c_nationkey group by s_nationkey, weight_string(s_nationkey)",
- "Table": "supplier",
- "Values": [
- ":l_suppkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
- "JoinVars": {
- "n_regionkey": 0
- },
- "TableName": "nation_region",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where 1 != 1 group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
- "Query": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where n_nationkey = :s_nationkey group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)",
- "Table": "nation",
- "Values": [
- ":s_nationkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from region where 1 != 1 group by 1",
- "Query": "select 1, count(*) from region where r_name = 'ASIA' and r_regionkey = :n_regionkey group by 1",
- "Table": "region",
- "Values": [
- ":n_regionkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.nation",
- "main.orders",
- "main.region",
- "main.supplier"
- ]
-}
-
-# TPC-H query 6
-"select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24"
-{
- "QueryType": "SELECT",
- "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
- "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Table": "lineitem"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS revenue",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1",
- "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity \u003c 24",
- "Table": "lineitem"
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem"
- ]
-}
-
-# TPC-H query 7
-"select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(3) AS revenue",
- "GroupBy": "(0|6), (1|5), (2|4)",
- "ResultColumns": 4,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 4] as supp_nation",
- "[COLUMN 5] as cust_nation",
- "[COLUMN 6] as l_year",
- "(((([COLUMN 10] * [COLUMN 11]) * [COLUMN 12]) * [COLUMN 13]) * [COLUMN 14]) * [COLUMN 15] as revenue",
- "[COLUMN 9]",
- "[COLUMN 8]",
- "[COLUMN 7]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|16) ASC, (1|17) ASC, (2|18) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,R:0,L:4,L:5,L:9,R:1,L:10,L:14,R:2,L:15,L:16,L:17,L:18,L:19,R:3,R:4,L:20,R:5,L:21",
- "JoinVars": {
- "n1_n_name": 2,
- "o_custkey": 0
- },
- "TableName": "lineitem_orders_supplier_nation_customer_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,R:1,R:2,L:2,L:3,L:5,R:3,R:4,R:5,L:6,L:8,R:6,R:7,R:8,L:9,L:10,L:11,R:9,R:10,R:11,L:12",
- "JoinVars": {
- "l_suppkey": 0
- },
- "TableName": "lineitem_orders_supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:2,L:3,L:1,R:0,L:2,L:6,R:2,L:7,L:4,R:1,L:8",
- "JoinVars": {
- "l_orderkey": 0
- },
- "TableName": "lineitem_orders",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
- "Query": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31') group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey)",
- "Query": "select o_custkey, count(*), weight_string(o_custkey) from orders where o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey)",
- "Table": "orders",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "hash"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,R:1,R:0,R:0,R:1,R:3,R:3,R:4,L:1,R:2,R:5",
- "JoinVars": {
- "s_nationkey": 0
- },
- "TableName": "supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)",
- "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey group by s_nationkey, weight_string(s_nationkey)",
- "Table": "supplier",
- "Values": [
- ":l_suppkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where 1 != 1 group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
- "Query": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where n1.n_nationkey = :s_nationkey group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)",
- "Table": "nation",
- "Values": [
- ":s_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1,R:3",
- "JoinVars": {
- "c_nationkey": 0
- },
- "TableName": "customer_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)",
- "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where 1 != 1 group by cust_nation, weight_string(cust_nation)",
- "Query": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where n2.n_nationkey = :c_nationkey and (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') group by cust_nation, weight_string(cust_nation)",
- "Table": "nation",
- "Values": [
- ":c_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.nation",
- "main.orders",
- "main.supplier"
- ]
-}
-
-# TPC-H query 8
-"select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: complex aggregate expression
-
-# TPC-H query 9
-"select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: aggregation on columns from different sources not supported yet
-
-# TPC-H query 10
-"select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003e= date('1993-10-01') and o_orderdate \u003c date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate \u003e= date('1993-10-01') and o_orderdate \u003c date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(20)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "2 DESC",
- "ResultColumns": 8,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(2) AS revenue",
- "GroupBy": "(0|14), (1|13), (3|12), (6|11), (4|10), (5|9), (7|8)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as c_custkey",
- "[COLUMN 1] as c_name",
- "(([COLUMN 14] * [COLUMN 15]) * [COLUMN 16]) * [COLUMN 17] as revenue",
- "[COLUMN 2] as c_acctbal",
- "[COLUMN 4] as n_name",
- "[COLUMN 5] as c_address",
- "[COLUMN 3] as c_phone",
- "[COLUMN 6] as c_comment",
- "[COLUMN 13]",
- "[COLUMN 12]",
- "[COLUMN 11]",
- "[COLUMN 10]",
- "[COLUMN 9]",
- "[COLUMN 8]",
- "[COLUMN 7]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|7) ASC, (1|8) ASC, (2|9) ASC, (3|10) ASC, (4|11) ASC, (5|12) ASC, (6|13) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,R:2,R:3,R:4,R:5,R:6,R:7,R:8,R:9,R:10,R:11,R:12,R:13,L:3,L:4,R:14,R:15",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_lineitem_customer_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_lineitem",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
- "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where o_orderdate \u003e= date('1993-10-01') and o_orderdate \u003c date('1993-10-01') + interval '3' month group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)",
- "Table": "orders"
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where 1 != 1 group by 1",
- "Query": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where l_returnflag = 'R' and l_orderkey = :o_orderkey group by 1",
- "Table": "lineitem"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:5,L:7,L:9,R:1,L:11,L:13,L:4,L:6,L:8,L:10,R:2,L:12,L:14,L:1,R:0",
- "JoinVars": {
- "c_nationkey": 0
- },
- "TableName": "customer_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
- "Query": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*), n_name, weight_string(n_name) from nation where 1 != 1 group by n_name, weight_string(n_name)",
- "Query": "select count(*), n_name, weight_string(n_name) from nation where n_nationkey = :c_nationkey group by n_name, weight_string(n_name)",
- "Table": "nation",
- "Values": [
- ":c_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.nation",
- "main.orders"
- ]
-}
-
-# TPC-H query 11
-"select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) \u003e ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: complex aggregate expression
-
-# TPC-H query 12
-"select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority \u003c\u003e '1-URGENT' and o_orderpriority \u003c\u003e '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate \u003c l_receiptdate and l_shipdate \u003c l_commitdate and l_receiptdate \u003e= date('1994-01-01') and l_receiptdate \u003c date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority \u003c\u003e '1-URGENT' and o_orderpriority \u003c\u003e '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate \u003c l_receiptdate and l_shipdate \u003c l_commitdate and l_receiptdate \u003e= date('1994-01-01') and l_receiptdate \u003c date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(1) AS high_line_count, sum(2) AS low_line_count",
- "GroupBy": "(0|3)",
- "ResultColumns": 3,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as l_shipmode",
- "[COLUMN 2] * [COLUMN 3] as high_line_count",
- "[COLUMN 4] * [COLUMN 5] as low_line_count",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_lineitem",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey)",
- "Query": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders group by o_orderkey, weight_string(o_orderkey)",
- "Table": "orders"
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1 group by l_shipmode, weight_string(l_shipmode)",
- "Query": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate \u003c l_receiptdate and l_shipdate \u003c l_commitdate and l_receiptdate \u003e= date('1994-01-01') and l_receiptdate \u003c date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey group by l_shipmode, weight_string(l_shipmode)",
- "Table": "lineitem"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 13
-"select c_count, count(*) as custdist from ( select c_custkey, count(o_orderkey) from customer left outer join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' group by c_custkey ) as c_orders(c_custkey, c_count) group by c_count order by custdist desc, c_count desc"
-"using aggregation on top of a *planbuilder.orderedAggregate plan is not yet supported"
-Gen4 plan same as above
-
-# TPC-H query 14
-"select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate \u003e= date('1995-09-01') and l_shipdate \u003c date('1995-09-01') + interval '1' month"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: complex aggregate expression
-
-# TPC-H query 15 view
-#"with revenue0(supplier_no, total_revenue) as (select l_suppkey, sum(l_extendedprice * (1 - l_discount)) from lineitem where l_shipdate >= date('1996-01-01') and l_shipdate < date('1996-01-01') + interval '3' month group by l_suppkey )"
-#"syntax error at position 236"
-#Gen4 plan same as above
-# TPC-H query 15
-"select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey"
-{
- "QueryType": "SELECT",
- "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
- "Query": "select max(total_revenue) from revenue0",
- "Table": "revenue0"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3,R:0",
- "JoinVars": {
- "s_suppkey": 0
- },
- "TableName": "supplier_revenue0",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier where 1 != 1",
- "OrderBy": "(0|4) ASC",
- "Query": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier order by s_suppkey asc",
- "ResultColumns": 4,
- "Table": "supplier"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select total_revenue from revenue0 where 1 != 1",
- "Query": "select total_revenue from revenue0 where supplier_no = :s_suppkey and total_revenue = :__sq1",
- "Table": "revenue0",
- "Values": [
- ":s_suppkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "max(0) AS max(total_revenue)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1",
- "Query": "select max(total_revenue) from revenue0",
- "Table": "revenue0"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1",
- "OrderBy": "(0|5) ASC",
- "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where total_revenue = :__sq1 and s_suppkey = supplier_no order by s_suppkey asc",
- "ResultColumns": 5,
- "Table": "revenue0, supplier"
- }
- ]
- },
- "TablesUsed": [
- "main.revenue0",
- "main.supplier"
- ]
-}
-
-# TPC-H query 16
-"select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand \u003c\u003e 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: using aggregation on top of a *planbuilder.pulloutSubquery plan is not yet supported
-
-# TPC-H query 17
-"select sum(l_extendedprice) / 7.0 as avg_yearly from lineitem, part where p_partkey = l_partkey and p_brand = 'Brand#23' and p_container = 'MED BOX' and l_quantity \u003c ( select 0.2 * avg(l_quantity) from lineitem where l_partkey = p_partkey )"
-"symbol p_partkey not found in table or subquery"
-Gen4 error: unsupported: cross-shard correlated subquery
-
-# TPC-H query 18
-"select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) \u003e 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) \u003e 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum(5) AS sum(l_quantity)",
- "GroupBy": "(4|10), (3|9), (0|8), (1|7), (2|6)",
- "ResultColumns": 6,
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 2] as c_name",
- "[COLUMN 3] as c_custkey",
- "[COLUMN 4] as o_orderkey",
- "[COLUMN 1] as o_orderdate",
- "[COLUMN 0] as o_totalprice",
- "([COLUMN 10] * [COLUMN 11]) * [COLUMN 12] as sum(l_quantity)",
- "[COLUMN 9]",
- "[COLUMN 8]",
- "[COLUMN 7]",
- "[COLUMN 6]",
- "[COLUMN 5]"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:2,L:3,L:4,L:5,L:6,L:8,L:9,L:10,L:11,L:12,L:13,L:14,R:1",
- "JoinVars": {
- "o_orderkey": 0
- },
- "TableName": "orders_customer_lineitem",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(2|8) DESC, (3|9) ASC, (4|10) ASC, (5|11) ASC, (0|7) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:1,L:5,L:7,R:1,R:3,L:1,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0",
- "JoinVars": {
- "o_custkey": 0
- },
- "TableName": "orders_customer",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
- "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) \u003e 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)",
- "Table": "orders",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where 1 != 1 group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
- "Query": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where c_custkey = :o_custkey group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)",
- "Table": "customer",
- "Values": [
- ":o_custkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "VindexLookup",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "Values": [
- ":o_orderkey"
- ],
- "Vindex": "lineitem_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1",
- "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals",
- "Table": "lineitem_map",
- "Values": [
- ":l_orderkey"
- ],
- "Vindex": "md5"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, sum(l_quantity) from lineitem where 1 != 1 group by 1",
- "Query": "select 1, sum(l_quantity) from lineitem where l_orderkey = :o_orderkey group by 1",
- "Table": "lineitem"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.customer",
- "main.lineitem",
- "main.orders"
- ]
-}
-
-# TPC-H query 19
-"select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity \u003e= 1 and l_quantity \u003c= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity \u003e= 10 and l_quantity \u003c= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity \u003e= 20 and l_quantity \u003c= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity \u003e= 1 and l_quantity \u003c= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity \u003e= 10 and l_quantity \u003c= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity \u003e= 20 and l_quantity \u003c= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )",
- "Instructions": {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum(0) AS revenue",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] * [COLUMN 1] as revenue"
- ],
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:4,R:1",
- "JoinVars": {
- "l_partkey": 0,
- "l_quantity": 1,
- "l_shipinstruct": 3,
- "l_shipmode": 2
- },
- "TableName": "lineitem_part",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem where 1 != 1 group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
- "Query": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) from part where 1 != 1 group by 1",
- "Query": "select 1, count(*) from part where p_partkey = :l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and :l_quantity \u003e= 1 and :l_quantity \u003c= 1 + 10 and p_size between 1 and 5 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and :l_quantity \u003e= 10 and :l_quantity \u003c= 10 + 10 and p_size between 1 and 10 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and :l_quantity \u003e= 20 and :l_quantity \u003c= 20 + 10 and p_size between 1 and 15 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' group by 1",
- "Table": "part"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.part"
- ]
-}
-
-# TPC-H query 20
-"select s_name, s_address from supplier, nation where s_suppkey in ( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty \u003e ( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and l_shipdate \u003e= date('1994-01-01') and l_shipdate \u003c date('1994-01-01') + interval '1' year ) ) and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name"
-"symbol ps_partkey not found in table or subquery"
-Gen4 error: unsupported: cross-shard correlated subquery
-
-# TPC-H query 21
-"select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate \u003e l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey \u003c\u003e l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey \u003c\u003e l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100"
-"unsupported: cross-shard query with aggregates"
-{
- "QueryType": "SELECT",
- "Original": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate \u003e l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey \u003c\u003e l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey \u003c\u003e l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(100)",
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "1 DESC, (0|2) ASC",
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "sum_count_star(1) AS numwait",
- "GroupBy": "(0|2)",
- "Inputs": [
- {
- "OperatorType": "Projection",
- "Expressions": [
- "[COLUMN 0] as s_name",
- "(([COLUMN 2] * [COLUMN 3]) * [COLUMN 4]) * [COLUMN 5] as numwait",
- "[COLUMN 1]"
- ],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "(0|1) ASC",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,R:1,L:3,L:4,R:2,R:3",
- "JoinVars": {
- "l1_l_suppkey": 0
- },
- "TableName": "lineitem_orders_supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1",
- "JoinVars": {
- "l1_l_orderkey": 0
- },
- "TableName": "lineitem_orders",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
- "Query": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate \u003e l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)",
- "Table": "lineitem"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) as numwait from orders where 1 != 1 group by 1",
- "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) and o_orderkey = :l1_l_orderkey group by 1",
- "Table": "orders",
- "Values": [
- ":l1_l_orderkey"
- ],
- "Vindex": "hash"
- }
- ]
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:3,L:4,L:1,R:1",
- "JoinVars": {
- "s_nationkey": 0
- },
- "TableName": "supplier_nation",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
- "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) and s_suppkey = :l1_l_suppkey group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)",
- "Table": "supplier",
- "Values": [
- ":l1_l_suppkey"
- ],
- "Vindex": "hash"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "main",
- "Sharded": true
- },
- "FieldQuery": "select 1, count(*) as numwait from nation where 1 != 1 group by 1",
- "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate \u003e l3.l_commitdate limit 1) and n_nationkey = :s_nationkey group by 1",
- "Table": "nation",
- "Values": [
- ":s_nationkey"
- ],
- "Vindex": "hash"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.lineitem",
- "main.nation",
- "main.orders",
- "main.supplier"
- ]
-}
-
-# TPC-H query 22
-"select cntrycode, count(*) as numcust, sum(c_acctbal) as totacctbal from ( select substring(c_phone from 1 for 2) as cntrycode, c_acctbal from customer where substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') and c_acctbal \u003e ( select avg(c_acctbal) from customer where c_acctbal \u003e 0.00 and substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') ) and not exists ( select * from orders where o_custkey = c_custkey ) ) as custsale group by cntrycode order by cntrycode"
-"symbol c_custkey not found in table or subquery"
-Gen4 error: exists sub-queries are only supported with AND clause
diff --git a/go/vt/vtgate/planbuilder/testdata/transaction_cases.json b/go/vt/vtgate/planbuilder/testdata/transaction_cases.json
new file mode 100644
index 00000000000..b36709cf12b
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/transaction_cases.json
@@ -0,0 +1,58 @@
+[
+ {
+ "comment": "Begin",
+ "query": "begin",
+ "plan": {
+ "QueryType": "BEGIN",
+ "Original": "begin"
+ }
+ },
+ {
+ "comment": "Start Transaction",
+ "query": "start transaction",
+ "plan": {
+ "QueryType": "BEGIN",
+ "Original": "start transaction"
+ }
+ },
+ {
+ "comment": "Commit",
+ "query": "commit",
+ "plan": {
+ "QueryType": "COMMIT",
+ "Original": "commit"
+ }
+ },
+ {
+ "comment": "Rollback",
+ "query": "rollback",
+ "plan": {
+ "QueryType": "ROLLBACK",
+ "Original": "rollback"
+ }
+ },
+ {
+ "comment": "Savepoint",
+ "query": "savepoint a",
+ "plan": {
+ "QueryType": "SAVEPOINT",
+ "Original": "savepoint a"
+ }
+ },
+ {
+ "comment": "Savepoint rollback",
+ "query": "rollback work to savepoint a",
+ "plan": {
+ "QueryType": "SAVEPOINT_ROLLBACK",
+ "Original": "rollback work to savepoint a"
+ }
+ },
+ {
+ "comment": "Savepoint release",
+ "query": "release savepoint a",
+ "plan": {
+ "QueryType": "RELEASE",
+ "Original": "release savepoint a"
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt b/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt
deleted file mode 100644
index 68be3ff6d8e..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/transaction_cases.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-# Begin
-"begin"
-{
- "QueryType": "BEGIN",
- "Original": "begin"
-}
-Gen4 plan same as above
-
-# Start Transaction
-"start transaction"
-{
- "QueryType": "BEGIN",
- "Original": "start transaction"
-}
-Gen4 plan same as above
-
-# Commit
-"commit"
-{
- "QueryType": "COMMIT",
- "Original": "commit"
-}
-Gen4 plan same as above
-
-# Rollback
-"rollback"
-{
- "QueryType": "ROLLBACK",
- "Original": "rollback"
-}
-Gen4 plan same as above
-
-# Savepoint
-"savepoint a"
-{
- "QueryType": "SAVEPOINT",
- "Original": "savepoint a"
-}
-Gen4 plan same as above
-
-# Savepoint rollback
-"rollback work to savepoint a"
-{
- "QueryType": "SAVEPOINT_ROLLBACK",
- "Original": "rollback work to savepoint a"
-}
-Gen4 plan same as above
-
-# Savepoint release
-"release savepoint a"
-{
- "QueryType": "RELEASE",
- "Original": "release savepoint a"
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json
new file mode 100644
index 00000000000..3cdb8d93ebb
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json
@@ -0,0 +1,2508 @@
+[
+ {
+ "comment": "union all between two scatter selects",
+ "query": "select id from user union all select id from music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union all select id from music",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
+ "Query": "select id from `user` union all select id from music",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union all select id from music",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
+ "Query": "select id from `user` union all select id from music",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union distinct between two scatter selects",
+ "query": "select id from user union select id from music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select id from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select id from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union all between two SelectEqualUnique",
+ "query": "select id from user where id = 1 union all select id from user where id = 5",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union all select id from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union all select id from user where id = 5",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 5",
+ "Table": "`user`",
+ "Values": [
+ "INT64(5)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "almost dereks query - two queries with order by and limit being scattered to two different sets of tablets",
+ "query": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(1)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Union all",
+ "query": "select col1, col2 from user union all select col1, col2 from user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
+ "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
+ "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union operations in subqueries (FROM)",
+ "query": "select * from (select * from user union all select * from user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select * from user union all select * from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select * from (select * from `user` union all select * from user_extra) as t",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from (select * from user union all select * from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select * from (select * from `user` union all select * from user_extra) as t",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union operations in derived table, without star expression (FROM)¡",
+ "query": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
+ "Table": "`user`"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
+ "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
+ "Table": "`user`"
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union all between two scatter selects, with order by",
+ "query": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union all on scatter and single route",
+ "query": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
+ "Instructions": {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union of information_schema with normal table",
+ "query": "select * from information_schema.a union select * from unsharded",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a union select * from unsharded",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select * from information_schema.a",
+ "Table": "information_schema.a"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from information_schema.a union select * from unsharded",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select distinct * from information_schema.a",
+ "Table": "information_schema.a"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select distinct * from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "union of information_schema with normal table",
+ "query": "select * from unsharded union select * from information_schema.a",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded union select * from information_schema.a",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select * from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select * from information_schema.a",
+ "Table": "information_schema.a"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from unsharded union select * from information_schema.a",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from unsharded where 1 != 1",
+ "Query": "select distinct * from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "DBA",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from information_schema.a where 1 != 1",
+ "Query": "select distinct * from information_schema.a",
+ "Table": "information_schema.a"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "information_schema.a",
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "multi-shard union",
+ "query": "(select id from user union select id from music) union select 1 from dual",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user union select id from music) union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from music where 1 != 1",
+ "Query": "select id from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1",
+ "Query": "select 1 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user union select id from music) union select 1 from dual",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1, weight_string(1) from dual where 1 != 1",
+ "Query": "select distinct 1, weight_string(1) from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multi-shard union",
+ "query": "select 1 from music union (select id from user union all select name from unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music union (select id from user union all select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `name` from unsharded where 1 != 1",
+ "Query": "select `name` from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": "nesting of unions at the right-hand side is not yet supported"
+ },
+ {
+ "comment": "multi-shard union",
+ "query": "select 1 from music union (select id from user union select name from unsharded)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music union (select id from user union select name from unsharded)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select `name` from unsharded where 1 != 1",
+ "Query": "select `name` from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": "nesting of unions at the right-hand side is not yet supported"
+ },
+ {
+ "comment": "union with the same target shard because of vindex",
+ "query": "select * from music where id = 1 union select * from user where id = 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select * from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select * from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from music where id = 1 union select * from user where id = 1",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from music where 1 != 1",
+ "Query": "select distinct * from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select * from `user` where 1 != 1",
+ "Query": "select distinct * from `user` where id = 1",
+ "Table": "`user`",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union with different target shards",
+ "query": "select 1 from music where id = 1 union select 1 from music where id = 2",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select 1 from music where id = 2",
+ "Table": "music",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "0: binary"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select distinct 1 from music where id = 1",
+ "Table": "music",
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "music_user_map"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from music where 1 != 1",
+ "Query": "select distinct 1 from music where id = 2",
+ "Table": "music",
+ "Values": [
+ "INT64(2)"
+ ],
+ "Vindex": "music_user_map"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music"
+ ]
+ }
+ },
+ {
+ "comment": "multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT",
+ "query": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by 1 desc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by 1 asc",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "(select id, weight_string(id) from `user` where 1 != 1) union (select id, weight_string(id) from `user` where 1 != 1)",
+ "OrderBy": "(0|1) DESC",
+ "Query": "(select id, weight_string(id) from `user` order by id desc) union (select id, weight_string(id) from `user` order by id asc)",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "multiple unions",
+ "query": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
+ "Query": "select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 2.0 from `user` where 1 != 1",
+ "Query": "select 2.0 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "0: binary"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from dual where 1 != 1 union all select null from dual where 1 != 1 union all select 1.0 from dual where 1 != 1 union all select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
+ "Query": "select 1 from dual union all select null from dual union all select 1.0 from dual union all select '1' from dual union select 2 from dual",
+ "Table": "dual"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 2.0 from `user` where 1 != 1",
+ "Query": "select distinct 2.0 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "union distinct between a scatter query and a join (other side)",
+ "query": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name` from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
+ "Query": "select 'b', 'c' from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:2)",
+ "(1:3)"
+ ],
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
+ "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "union distinct between a scatter query and a join (other side)",
+ "query": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
+ "Query": "select 'b', 'c' from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name` from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:2)",
+ "(1:3)"
+ ],
+ "ResultColumns": 2,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
+ "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
+ "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "unmergable because we are using aggregation",
+ "query": "select count(*) as s from user union select count(*) as s from music",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) as s from user union select count(*) as s from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from `user` where 1 != 1",
+ "Query": "select count(*) as s from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count(0) AS count",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from music where 1 != 1",
+ "Query": "select count(*) as s from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select count(*) as s from user union select count(*) as s from music",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "0: binary"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from `user` where 1 != 1",
+ "Query": "select count(*) as s from `user`",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Scalar",
+ "Aggregates": "sum_count_star(0) AS s",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*) as s from music where 1 != 1",
+ "Query": "select count(*) as s from music",
+ "Table": "music"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Union in derived table with first SELECT being an UNION",
+ "query": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id + 1 from `user` where 1 != 1",
+ "Query": "select id + 1 from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_id from user_extra where 1 != 1",
+ "Query": "select user_id from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union all select id + 1, weight_string(id + 1) from `user` where 1 != 1 union select user_id, weight_string(user_id) from user_extra where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union all select id + 1, weight_string(id + 1) from `user` union select user_id, weight_string(user_id) from user_extra",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "gen4 optimises away ORDER BY when it's safe to do",
+ "query": "(select id from user union select id from music order by id) union select 1 from unsharded",
+ "v3-plan": "can't do ORDER BY on top of UNION",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select id from user union select id from music order by id) union select 1 from unsharded",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1, weight_string(1) from unsharded where 1 != 1",
+ "Query": "select distinct 1, weight_string(1) from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.music",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "push down the ::upper_limit to the sources, since we are doing DISTINCT on them, it's safe",
+ "query": "select id from user union select 3 limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select 3 limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 3 from dual where 1 != 1",
+ "Query": "select 3 from dual",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user union select 3 limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "Query": "select distinct id, weight_string(id) from `user` limit :__upper_limit",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Reference",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 3, weight_string(3) from dual where 1 != 1",
+ "Query": "select distinct 3, weight_string(3) from dual limit :__upper_limit",
+ "Table": "dual"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.dual",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "silly query that should be collapsed into a single unsharded UNION route",
+ "query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
+ "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
+ "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes",
+ "query": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col from unsharded where 1 != 1",
+ "Query": "select col from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user`",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col2 from unsharded where 1 != 1",
+ "Query": "select col2 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from user_extra where 1 != 1",
+ "Query": "select col from user_extra",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select col, weight_string(col) from unsharded where 1 != 1 union select col2, weight_string(col2) from unsharded where 1 != 1",
+ "Query": "select col, weight_string(col) from unsharded union select col2, weight_string(col2) from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select col, weight_string(col) from user_extra where 1 != 1",
+ "Query": "select id, weight_string(id) from `user` union select col, weight_string(col) from user_extra",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with union",
+ "query": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "tbl1_id": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
+ "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
+ "Table": "`user`",
+ "Values": [
+ ":tbl1_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "tbl1_id": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 0
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) ASC",
+ "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(5)",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
+ "OrderBy": "(0|1) DESC",
+ "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
+ "ResultColumns": 1,
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
+ "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
+ "Table": "`user`",
+ "Values": [
+ ":tbl1_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous LIMIT",
+ "query": "select id from user limit 1 union all select id from music limit 1",
+ "plan": "syntax error at position 34 near 'union'"
+ },
+ {
+ "comment": "ambiguous ORDER BY",
+ "query": "select id from user order by id union all select id from music order by id desc",
+ "plan": "syntax error at position 38 near 'union'"
+ },
+ {
+ "comment": "different number of columns",
+ "query": "select id, 42 from user where id = 1 union all select id from user where id = 5",
+ "v3-plan": "The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5",
+ "gen4-plan": "The used SELECT statements have a different number of columns"
+ },
+ {
+ "comment": "union with invalid order by clause with table qualifier",
+ "query": "select id from user union select 3 order by user.id",
+ "v3-plan": "can't do ORDER BY on top of UNION",
+ "gen4-plan": "Table 'user' from one of the SELECTs cannot be used in global ORDER clause"
+ },
+ {
+ "comment": "union with invalid order by clause with table qualifier",
+ "query": "select id from user union select 3 order by id",
+ "plan": "can't do ORDER BY on top of UNION"
+ },
+ {
+ "comment": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
+ "query": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
+ "v3-plan": "unsupported: expression on results of a cross-shard subquery",
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
+ "Instructions": {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
+ 2
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:1)"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id + 42 as foo, weight_string(id + 42), 1 from `user` where 1 != 1",
+ "Query": "select distinct id + 42 as foo, weight_string(id + 42), 1 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 + id as foo, weight_string(1 + id), 1 from unsharded where 1 != 1",
+ "Query": "select distinct 1 + id as foo, weight_string(1 + id), 1 from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "unknown columns are OK as long as the whole query is unsharded",
+ "query": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select * from unsharded where 1 != 1) as last_failed where 1 != 1 union all select * from (select * from unsharded where 1 != 1) as last_succeeded where 1 != 1",
+ "Query": "select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'FAILED' order by buildNumber desc limit 1) as last_failed union all select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'SUCCEEDED' order by buildNumber desc limit 1) as last_succeeded order by buildNumber desc limit 1",
+ "Table": "unsharded"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select * from (select * from unsharded where 1 != 1) as last_failed where 1 != 1 union all select * from (select * from unsharded where 1 != 1) as last_succeeded where 1 != 1",
+ "Query": "select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'FAILED' order by buildNumber desc limit 1) as last_failed union all select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'SUCCEEDED' order by buildNumber desc limit 1) as last_succeeded order by buildNumber desc limit 1",
+ "Table": "unsharded"
+ },
+ "TablesUsed": [
+ "main.unsharded"
+ ]
+ }
+ },
+ {
+ "comment": "union of unsharded route with sharded join with involvement of weight string",
+ "query": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, foo, bar from unsharded where 1 != 1",
+ "Query": "select id, foo, bar from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "TableName": "`user`_authoritative",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.intcol, `user`.textcol2 from `user` where 1 != 1",
+ "Query": "select `user`.intcol, `user`.textcol2 from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select authoritative.col2 from authoritative where 1 != 1",
+ "Query": "select authoritative.col2 from authoritative",
+ "Table": "authoritative"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative",
+ "Instructions": {
+ "OperatorType": "Distinct",
+ "Collations": [
+ "(0:3)",
+ "(1:4)",
+ "(2:5)"
+ ],
+ "ResultColumns": 3,
+ "Inputs": [
+ {
+ "OperatorType": "Concatenate",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded where 1 != 1",
+ "Query": "select distinct id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0,L:2,L:3,R:1",
+ "TableName": "`user`_authoritative",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user` where 1 != 1",
+ "Query": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select authoritative.col2, weight_string(authoritative.col2) from authoritative where 1 != 1",
+ "Query": "select authoritative.col2, weight_string(authoritative.col2) from authoritative",
+ "Table": "authoritative"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.authoritative",
+ "user.user"
+ ]
+ }
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.txt b/go/vt/vtgate/planbuilder/testdata/union_cases.txt
deleted file mode 100644
index abf1f20f7b9..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/union_cases.txt
+++ /dev/null
@@ -1,2310 +0,0 @@
-# union all between two scatter selects
-"select id from user union all select id from music"
-{
- "QueryType": "SELECT",
- "Original": "select id from user union all select id from music",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
- "Query": "select id from `user` union all select id from music",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user union all select id from music",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1",
- "Query": "select id from `user` union all select id from music",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union distinct between two scatter selects
-"select id from user union select id from music"
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select id from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select id from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union all between two SelectEqualUnique
-"select id from user where id = 1 union all select id from user where id = 5"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union all select id from user where id = 5",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union all select id from user where id = 5",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 5",
- "Table": "`user`",
- "Values": [
- "INT64(5)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-#almost dereks query - two queries with order by and limit being scattered to two different sets of tablets
-"(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)"
-{
- "QueryType": "SELECT",
- "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(1)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Union all
-"select col1, col2 from user union all select col1, col2 from user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
- "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1, col2 from user union all select col1, col2 from user_extra",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1",
- "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union operations in subqueries (FROM)
-"select * from (select * from user union all select * from user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select * from (select * from user union all select * from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select * from (select * from `user` union all select * from user_extra) as t",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from (select * from user union all select * from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select * from (select * from `user` union all select * from user_extra) as t",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union operations in derived table, without star expression (FROM)¡
-"select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
- "Table": "`user`"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1",
- "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t",
- "Table": "`user`"
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union all between two scatter selects, with order by
-"(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from music where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "music"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union all on scatter and single route
-"select id from user where id = 1 union select id from user where id = 1 union all select id from user"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user",
- "Instructions": {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1",
- "Query": "select id from `user` where id = 1 union select id from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# union of information_schema with normal table
-"select * from information_schema.a union select * from unsharded"
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a union select * from unsharded",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from information_schema.a union select * from unsharded",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select distinct * from information_schema.a",
- "Table": "information_schema.a"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select distinct * from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# union of information_schema with normal table
-"select * from unsharded union select * from information_schema.a"
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded union select * from information_schema.a",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select * from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select * from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from unsharded union select * from information_schema.a",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from unsharded where 1 != 1",
- "Query": "select distinct * from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "DBA",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select * from information_schema.a where 1 != 1",
- "Query": "select distinct * from information_schema.a",
- "Table": "information_schema.a"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# multi-shard union
-"(select id from user union select id from music) union select 1 from dual"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user union select id from music) union select 1 from dual",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from music where 1 != 1",
- "Query": "select id from music",
- "Table": "music"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1",
- "Query": "select 1 from dual",
- "Table": "dual"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user union select id from music) union select 1 from dual",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1, weight_string(1) from dual where 1 != 1",
- "Query": "select distinct 1, weight_string(1) from dual",
- "Table": "dual"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.music",
- "user.user"
- ]
-}
-
-# multi-shard union
-"select 1 from music union (select id from user union all select name from unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music union (select id from user union all select name from unsharded)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- },
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select `name` from unsharded where 1 != 1",
- "Query": "select `name` from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-Gen4 error: nesting of unions at the right-hand side is not yet supported
-
-# multi-shard union
-"select 1 from music union (select id from user union select name from unsharded)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music union (select id from user union select name from unsharded)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music",
- "Table": "music"
- },
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select `name` from unsharded where 1 != 1",
- "Query": "select `name` from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
-}
-Gen4 error: nesting of unions at the right-hand side is not yet supported
-
-# union with the same target shard because of vindex
-"select * from music where id = 1 union select * from user where id = 1"
-{
- "QueryType": "SELECT",
- "Original": "select * from music where id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select * from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select * from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from music where id = 1 union select * from user where id = 1",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from music where 1 != 1",
- "Query": "select distinct * from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select * from `user` where 1 != 1",
- "Query": "select distinct * from `user` where id = 1",
- "Table": "`user`",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# union with different target shards
-"select 1 from music where id = 1 union select 1 from music where id = 2"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select 1 from music where id = 2",
- "Table": "music",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from music where id = 1 union select 1 from music where id = 2",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "0: binary"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select distinct 1 from music where id = 1",
- "Table": "music",
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "music_user_map"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from music where 1 != 1",
- "Query": "select distinct 1 from music where id = 2",
- "Table": "music",
- "Values": [
- "INT64(2)"
- ],
- "Vindex": "music_user_map"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music"
- ]
-}
-
-# multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT
-"(select id from user order by 1 desc) union (select id from user order by 1 asc)"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by 1 desc",
- "ResultColumns": 1,
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by 1 asc",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "(select id, weight_string(id) from `user` where 1 != 1) union (select id, weight_string(id) from `user` where 1 != 1)",
- "OrderBy": "(0|1) DESC",
- "Query": "(select id, weight_string(id) from `user` order by id desc) union (select id, weight_string(id) from `user` order by id asc)",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# multiple unions
-"select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user"
-{
- "QueryType": "SELECT",
- "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
- "Query": "select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 2.0 from `user` where 1 != 1",
- "Query": "select 2.0 from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "0: binary"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from dual where 1 != 1 union all select null from dual where 1 != 1 union all select 1.0 from dual where 1 != 1 union all select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1",
- "Query": "select 1 from dual union all select null from dual union all select 1.0 from dual union all select '1' from dual union select 2 from dual",
- "Table": "dual"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 2.0 from `user` where 1 != 1",
- "Query": "select distinct 2.0 from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# union distinct between a scatter query and a join (other side)
-"(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user"
-{
- "QueryType": "SELECT",
- "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name` from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
- "Query": "select 'b', 'c' from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:2)",
- "(1:3)"
- ],
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
- "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# union distinct between a scatter query and a join (other side)
-"select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')"
-{
- "QueryType": "SELECT",
- "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c' from `user` where 1 != 1",
- "Query": "select 'b', 'c' from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name` from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:2)",
- "(1:3)"
- ],
- "ResultColumns": 2,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1",
- "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,L:2,L:3",
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1",
- "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from user_extra where 1 != 1",
- "Query": "select 1 from user_extra where user_extra.extra = 'asdf'",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# unmergable because we are using aggregation
-"select count(*) as s from user union select count(*) as s from music"
-{
- "QueryType": "SELECT",
- "Original": "select count(*) as s from user union select count(*) as s from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from `user` where 1 != 1",
- "Query": "select count(*) as s from `user`",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count(0) AS count",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from music where 1 != 1",
- "Query": "select count(*) as s from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select count(*) as s from user union select count(*) as s from music",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "0: binary"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from `user` where 1 != 1",
- "Query": "select count(*) as s from `user`",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Scalar",
- "Aggregates": "sum_count_star(0) AS s",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*) as s from music where 1 != 1",
- "Query": "select count(*) as s from music",
- "Table": "music"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.music",
- "user.user"
- ]
-}
-
-# Union in derived table with first SELECT being an UNION
-"select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t"
-{
- "QueryType": "SELECT",
- "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id + 1 from `user` where 1 != 1",
- "Query": "select id + 1 from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select user_id from user_extra where 1 != 1",
- "Query": "select user_id from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union all select id + 1, weight_string(id + 1) from `user` where 1 != 1 union select user_id, weight_string(user_id) from user_extra where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union all select id + 1, weight_string(id + 1) from `user` union select user_id, weight_string(user_id) from user_extra",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# gen4 optimises away ORDER BY when it's safe to do
-"(select id from user union select id from music order by id) union select 1 from unsharded"
-"can't do ORDER BY on top of UNION"
-{
- "QueryType": "SELECT",
- "Original": "(select id from user union select id from music order by id) union select 1 from unsharded",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1, weight_string(1) from unsharded where 1 != 1",
- "Query": "select distinct 1, weight_string(1) from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.music",
- "user.user"
- ]
-}
-
-# push down the ::upper_limit to the sources, since we are doing DISTINCT on them, it's safe
-"select id from user union select 3 limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select 3 limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 3 from dual where 1 != 1",
- "Query": "select 3 from dual",
- "Table": "dual"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user union select 3 limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "Query": "select distinct id, weight_string(id) from `user` limit :__upper_limit",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Reference",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 3, weight_string(3) from dual where 1 != 1",
- "Query": "select distinct 3, weight_string(3) from dual limit :__upper_limit",
- "Table": "dual"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.dual",
- "user.user"
- ]
-}
-
-# silly query that should be collapsed into a single unsharded UNION route
-"(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1"
-{
- "QueryType": "SELECT",
- "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
- "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
- "Table": "unsharded"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1",
- "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc",
- "Table": "unsharded"
- },
- "TablesUsed": [
- "main.unsharded"
- ]
-}
-
-# UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes
-"select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra"
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
- "Instructions": {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col from unsharded where 1 != 1",
- "Query": "select col from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user`",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col2 from unsharded where 1 != 1",
- "Query": "select col2 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from user_extra where 1 != 1",
- "Query": "select col from user_extra",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra",
- "Instructions": {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "ResultColumns": 1,
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select col, weight_string(col) from unsharded where 1 != 1 union select col2, weight_string(col2) from unsharded where 1 != 1",
- "Query": "select col, weight_string(col) from unsharded union select col2, weight_string(col2) from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select col, weight_string(col) from user_extra where 1 != 1",
- "Query": "select id, weight_string(id) from `user` union select col, weight_string(col) from user_extra",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user",
- "user.user_extra"
- ]
-}
-
-# derived table with union
-"select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id"
-{
- "QueryType": "SELECT",
- "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "tbl1_id": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
- "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
- "Table": "`user`",
- "Values": [
- ":tbl1_id"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "tbl1_id": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 0
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) ASC",
- "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Limit",
- "Count": "INT64(5)",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1",
- "OrderBy": "(0|1) DESC",
- "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit",
- "ResultColumns": 1,
- "Table": "`user`"
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1",
- "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id",
- "Table": "`user`",
- "Values": [
- ":tbl1_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# ambiguous LIMIT
-"select id from user limit 1 union all select id from music limit 1"
-"syntax error at position 34 near 'union'"
-Gen4 plan same as above
-
-# ambiguous ORDER BY
-"select id from user order by id union all select id from music order by id desc"
-"syntax error at position 38 near 'union'"
-Gen4 plan same as above
-
-# different number of columns
-"select id, 42 from user where id = 1 union all select id from user where id = 5"
-"The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5"
-Gen4 error: The used SELECT statements have a different number of columns
-
-# union with invalid order by clause with table qualifier
-"select id from user union select 3 order by user.id"
-"can't do ORDER BY on top of UNION"
-Gen4 error: Table 'user' from one of the SELECTs cannot be used in global ORDER clause
-
-# union with invalid order by clause with table qualifier
-"select id from user union select 3 order by id"
-"can't do ORDER BY on top of UNION"
-Gen4 plan same as above
-
-"select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t"
-"unsupported: expression on results of a cross-shard subquery"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t",
- "Instructions": {
- "OperatorType": "SimpleProjection",
- "Columns": [
- 2
- ],
- "Inputs": [
- {
- "OperatorType": "Distinct",
- "Collations": [
- "(0:1)"
- ],
- "Inputs": [
- {
- "OperatorType": "Concatenate",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id + 42 as foo, weight_string(id + 42), 1 from `user` where 1 != 1",
- "Query": "select distinct id + 42 as foo, weight_string(id + 42), 1 from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 + id as foo, weight_string(1 + id), 1 from unsharded where 1 != 1",
- "Query": "select distinct 1 + id as foo, weight_string(1 + id), 1 from unsharded",
- "Table": "unsharded"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
new file mode 100644
index 00000000000..cb82f750ced
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
@@ -0,0 +1,482 @@
+[
+ {
+ "comment": "union operations in subqueries (expressions)",
+ "query": "select * from user where id in (select * from user union select * from user_extra)",
+ "plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "TODO: Implement support for select with a target destination",
+ "query": "select * from `user[-]`.user_metadata",
+ "plan": "unsupported: SELECT with a target destination"
+ },
+ {
+ "comment": "Unsupported INSERT statement with a target destination",
+ "query": "insert into `user[-]`.user_metadata (a, b) values (1,2)",
+ "plan": "unsupported: INSERT with a target destination"
+ },
+ {
+ "comment": "Unsupported delete statement with a replica target destination",
+ "query": "DELETE FROM `user[-]@replica`.user_metadata limit 1",
+ "plan": "unsupported: delete statement with a replica target"
+ },
+ {
+ "comment": "Unsupported update statement with a replica target destination",
+ "query": "update `user[-]@replica`.user_metadata set id=2",
+ "plan": "unsupported: update statement with a replica target"
+ },
+ {
+ "comment": "scatter order by with * expression",
+ "query": "select * from user order by id",
+ "v3-plan": "unsupported: in scatter query: order by must reference a column in the select list: id asc",
+ "gen4-plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "order by rand on a cross-shard subquery",
+ "query": "select id from (select user.id, user.col from user join user_extra) as t order by rand()",
+ "v3-plan": "unsupported: memory sort: complex order by expression: rand()",
+ "gen4-plan": "unsupported: in scatter query: complex order by expression: rand()"
+ },
+ {
+ "comment": "natural join",
+ "query": "select * from user natural join user_extra",
+ "plan": "unsupported: natural join"
+ },
+ {
+ "comment": "join with USING construct",
+ "query": "select * from user join user_extra using(id)",
+ "v3-plan": "unsupported: join with USING(column_list) clause for complex queries",
+ "gen4-plan": "can't handle JOIN USING without authoritative tables"
+ },
+ {
+ "comment": "join with USING construct with 3 tables",
+ "query": "select user.id from user join user_extra using(id) join music using(id2)",
+ "v3-plan": "unsupported: join with USING(column_list) clause for complex queries",
+ "gen4-plan": "can't handle JOIN USING without authoritative tables"
+ },
+ {
+ "comment": "natural left join",
+ "query": "select * from user natural left join user_extra",
+ "plan": "unsupported: natural left join"
+ },
+ {
+ "comment": "natural right join",
+ "query": "select * from user natural right join user_extra",
+ "plan": "unsupported: natural right join"
+ },
+ {
+ "comment": "* expresson not allowed for cross-shard joins",
+ "query": "select * from user join user_extra",
+ "plan": "unsupported: '*' expression in cross-shard query"
+ },
+ {
+ "comment": "Group by column number, used with non-aliased expression (duplicated code)",
+ "query": "select * from user group by 1",
+ "v3-plan": "unsupported: '*' expression in cross-shard query",
+ "gen4-plan": "cannot use column offsets in group statement when using `*`"
+ },
+ {
+ "comment": "complex group by expression",
+ "query": "select a from user group by a+1",
+ "v3-plan": "unsupported: in scatter query: only simple references allowed",
+ "gen4-plan": "unsupported: in scatter query: complex order by expression: a + 1"
+ },
+ {
+ "comment": "Complex aggregate expression on scatter",
+ "query": "select 1+count(*) from user",
+ "plan": "unsupported: in scatter query: complex aggregate expression"
+ },
+ {
+ "comment": "Multi-value aggregates not supported",
+ "query": "select count(a,b) from user",
+ "v3-plan": "unsupported: only one expression allowed inside aggregates: count(a, b)",
+ "gen4-plan": "aggregate functions take a single argument 'count(a, b)'"
+ },
+ {
+ "comment": "scatter aggregate complex order by",
+ "query": "select id from user group by id order by id+1",
+ "plan": "unsupported: in scatter query: complex order by expression: id + 1"
+ },
+ {
+ "comment": "Scatter order by is complex with aggregates in select",
+ "query": "select col, count(*) from user group by col order by col+1",
+ "plan": "unsupported: in scatter query: complex order by expression: col + 1"
+ },
+ {
+ "comment": "Aggregate detection (group_concat)",
+ "query": "select group_concat(user.a) from user join user_extra",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "unsupported: in scatter query: aggregation function 'group_concat'"
+ },
+ {
+ "comment": "subqueries not supported in group by",
+ "query": "select id from user group by id, (select id from user_extra)",
+ "v3-plan": "unsupported: subqueries disallowed in GROUP or ORDER BY",
+ "gen4-plan": "unsupported: subqueries disallowed in GROUP BY"
+ },
+ {
+ "comment": "Order by uses cross-shard expression",
+ "query": "select id from user order by id+1",
+ "plan": "unsupported: in scatter query: complex order by expression: id + 1"
+ },
+ {
+ "comment": "Order by column number with collate",
+ "query": "select user.col1 as a from user order by 1 collate utf8_general_ci",
+ "v3-plan": "unsupported: in scatter query: complex order by expression: 1 collate utf8_general_ci",
+ "gen4-plan": "unsupported: in scatter query: complex order by expression: a collate utf8_general_ci"
+ },
+ {
+ "comment": "subqueries in delete",
+ "query": "delete from user where col = (select id from unsharded)",
+ "plan": "unsupported: subqueries in sharded DML"
+ },
+ {
+ "comment": "sharded subqueries in unsharded delete",
+ "query": "delete from unsharded where col = (select id from user)",
+ "plan": "unsupported: sharded subqueries in DML"
+ },
+ {
+ "comment": "sharded delete with limit clasue",
+ "query": "delete from user_extra limit 10",
+ "plan": "multi shard delete with limit is not supported"
+ },
+ {
+ "comment": "sharded subquery in unsharded subquery in unsharded delete",
+ "query": "delete from unsharded where col = (select id from unsharded where id = (select id from user))",
+ "plan": "unsupported: sharded subqueries in DML"
+ },
+ {
+ "comment": "sharded join unsharded subqueries in unsharded delete",
+ "query": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)",
+ "plan": "unsupported: sharded subqueries in DML"
+ },
+ {
+ "comment": "scatter update with limit clause",
+ "query": "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1",
+ "plan": "multi shard update with limit is not supported"
+ },
+ {
+ "comment": "multi delete multi table",
+ "query": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'",
+ "plan": "unsupported: multi-shard or vindex write statement"
+ },
+ {
+ "comment": "update changes primary vindex column",
+ "query": "update user set id = 1 where id = 1",
+ "plan": "unsupported: You can't update primary vindex columns. Invalid update on vindex: user_index"
+ },
+ {
+ "comment": "update change in multicol vindex column",
+ "query": "update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2",
+ "plan": "unsupported: You can't update primary vindex columns. Invalid update on vindex: multicolIdx"
+ },
+ {
+ "comment": "update changes non lookup vindex column",
+ "query": "update user_metadata set md5 = 1 where user_id = 1",
+ "plan": "unsupported: You can only update lookup vindexes. Invalid update on vindex: user_md5_index"
+ },
+ {
+ "comment": "update with complex set clause",
+ "query": "update music set id = id + 1 where id = 1",
+ "plan": "unsupported: Only values are supported. Invalid update on column: `id` with expr: [id + 1]"
+ },
+ {
+ "comment": "update by primary keyspace id, changing one vindex column, limit without order clause",
+ "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10",
+ "plan": "unsupported: Need to provide order by clause when using limit. Invalid update on vindex: email_user_map"
+ },
+ {
+ "comment": "update with derived table",
+ "query": "update (select id from user) as u set id = 4",
+ "v3-plan": "unsupported: subqueries in sharded DML",
+ "gen4-plan": "The target table u of the UPDATE is not updatable"
+ },
+ {
+ "comment": "join in update tables",
+ "query": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'",
+ "v3-plan": "unsupported: multi-shard or vindex write statement",
+ "gen4-plan": "unsupported: multiple tables in update"
+ },
+ {
+ "comment": "multiple tables in update",
+ "query": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id",
+ "v3-plan": "unsupported: multi-shard or vindex write statement",
+ "gen4-plan": "unsupported: multiple tables in update"
+ },
+ {
+ "comment": "unsharded insert, unqualified names and auto-inc combined",
+ "query": "insert into unsharded_auto select col from unsharded",
+ "plan": "unsupported: auto-inc and select in insert"
+ },
+ {
+ "comment": "unsharded insert, no col list with auto-inc",
+ "query": "insert into unsharded_auto values(1,1)",
+ "plan": "column list required for tables with auto-inc columns"
+ },
+ {
+ "comment": "unsharded insert, col list does not match values",
+ "query": "insert into unsharded_auto(id, val) values(1)",
+ "plan": "column list doesn't match values"
+ },
+ {
+ "comment": "sharded upsert can't change vindex",
+ "query": "insert into user(id) values(1) on duplicate key update id = 3",
+ "plan": "unsupported: DML cannot change vindex column"
+ },
+ {
+ "comment": "sharded upsert can't change vindex using values function",
+ "query": "insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(id)",
+ "plan": "unsupported: DML cannot change vindex column"
+ },
+ {
+ "comment": "sharded replace no vindex",
+ "query": "replace into user(val) values(1, 'foo')",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "sharded replace with vindex",
+ "query": "replace into user(id, name) values(1, 'foo')",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace no column list",
+ "query": "replace into user values(1, 2, 3)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace with mimatched column list",
+ "query": "replace into user(id) values (1, 2)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace with one vindex",
+ "query": "replace into user(id) values (1)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace with non vindex on vindex-enabled table",
+ "query": "replace into user(nonid) values (2)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace with all vindexes supplied",
+ "query": "replace into user(nonid, name, id) values (2, 'foo', 1)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace for non-vindex autoinc",
+ "query": "replace into user_extra(nonid) values (2)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "replace with multiple rows",
+ "query": "replace into user(id) values (1), (2)",
+ "plan": "unsupported: REPLACE INTO with sharded schema"
+ },
+ {
+ "comment": "select keyspace_id from user_index where id = 1 and id = 2",
+ "query": "select keyspace_id from user_index where id = 1 and id = 2",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (multiple filters)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where func(id)",
+ "query": "select keyspace_id from user_index where func(id)",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (not a comparison)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where id > 1",
+ "query": "select keyspace_id from user_index where id > 1",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (not equality)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where 1 = id",
+ "query": "select keyspace_id from user_index where 1 = id",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not a column)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where keyspace_id = 1",
+ "query": "select keyspace_id from user_index where keyspace_id = 1",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not id)"
+ },
+ {
+ "comment": "select keyspace_id from user_index where id = id+1",
+ "query": "select keyspace_id from user_index where id = id+1",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (rhs is not a value)"
+ },
+ {
+ "comment": "vindex func without where condition",
+ "query": "select keyspace_id from user_index",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (where clause missing)"
+ },
+ {
+ "comment": "vindex func in subquery without where",
+ "query": "select id from user where exists(select keyspace_id from user_index)",
+ "plan": "unsupported: where clause for vindex function must be of the form id = or id in(,...) (where clause missing)"
+ },
+ {
+ "comment": "select func(keyspace_id) from user_index where id = :id",
+ "query": "select func(keyspace_id) from user_index where id = :id",
+ "plan": "unsupported: expression on results of a vindex function"
+ },
+ {
+ "comment": "delete with multi-table targets",
+ "query": "delete music,user from music inner join user where music.id = user.id",
+ "plan": "unsupported: multi-shard or vindex write statement"
+ },
+ {
+ "comment": "select get_lock with non-dual table",
+ "query": "select get_lock('xyz', 10) from user",
+ "plan": "get_lock('xyz', 10) allowed only with dual"
+ },
+ {
+ "comment": "select is_free_lock with non-dual table",
+ "query": "select is_free_lock('xyz') from user",
+ "plan": "is_free_lock('xyz') allowed only with dual"
+ },
+ {
+ "comment": "union with SQL_CALC_FOUND_ROWS",
+ "query": "(select sql_calc_found_rows id from user where id = 1 limit 1) union select id from user where id = 1",
+ "plan": "SQL_CALC_FOUND_ROWS not supported with union"
+ },
+ {
+ "comment": "set with DEFAULT - vitess aware",
+ "query": "set workload = default",
+ "plan": "DEFAULT not supported for @@workload"
+ },
+ {
+ "comment": "set with DEFAULT - reserved connection",
+ "query": "set sql_mode = default",
+ "plan": "DEFAULT not supported for @@sql_mode"
+ },
+ {
+ "comment": "Multi shard query using into outfile s3",
+ "query": "select * from user into outfile s3 'out_file_name'",
+ "plan": "INTO is not supported on sharded keyspace"
+ },
+ {
+ "comment": "create view with Cannot auto-resolve for cross-shard joins",
+ "query": "create view user.view_a as select col from user join user_extra",
+ "v3-plan": "symbol col not found",
+ "gen4-plan": "Column 'col' in field list is ambiguous"
+ },
+ {
+ "comment": "create view with join that cannot be served in each shard separately",
+ "query": "create view user.view_a as select user_extra.id from user join user_extra",
+ "plan": "Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with sharded limit",
+ "query": "create view user.view_a as select id from user order by id limit 10",
+ "plan": "Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with top level subquery in select",
+ "query": "create view user.view_a as select a, (select col from user) from unsharded",
+ "plan": "Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with sql_calc_found_rows with limit",
+ "query": "create view user.view_a as select sql_calc_found_rows * from music limit 100",
+ "plan": "Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with sql_calc_found_rows with group by and having",
+ "query": "create view user.view_a as select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2",
+ "plan": "Complex select queries are not supported in create or alter view statements"
+ },
+ {
+ "comment": "create view with incompatible keyspaces",
+ "query": "create view main.view_a as select * from user.user_extra",
+ "plan": "Select query does not belong to the same keyspace as the view statement"
+ },
+ {
+ "comment": "avg function on scatter query",
+ "query": "select avg(id) from user",
+ "v3-plan": "unsupported: in scatter query: complex aggregate expression",
+ "gen4-plan": "unsupported: in scatter query: aggregation function 'avg'"
+ },
+ {
+ "comment": "scatter aggregate with ambiguous aliases",
+ "query": "select distinct a, b as a from user",
+ "plan": "generating order by clause: ambiguous symbol reference: a"
+ },
+ {
+ "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# This query will never work as the inner derived table is only selecting one of the column",
+ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select id from user_extra where user_id = 5) uu where uu.user_id = uu.id))",
+ "plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# changed to project all the columns from the derived tables.",
+ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select col, id, user_id from user_extra where user_id = 5) uu where uu.user_id = uu.id))",
+ "plan": "unsupported: cross-shard correlated subquery"
+ },
+ {
+ "comment": "Gen4 does a rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.",
+ "query": "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2",
+ "v3-plan": "can't do ORDER BY on top of UNION",
+ "gen4-plan": "Column 'id' in field list is ambiguous"
+ },
+ {
+ "comment": "unsupported with clause in delete statement",
+ "query": "with x as (select * from user) delete from x",
+ "plan": "unsupported: with expression in delete statement"
+ },
+ {
+ "comment": "unsupported with clause in update statement",
+ "query": "with x as (select * from user) update x set name = 'f'",
+ "plan": "unsupported: with expression in update statement"
+ },
+ {
+ "comment": "unsupported with clause in select statement",
+ "query": "with x as (select * from user) select * from x",
+ "plan": "unsupported: with expression in select statement"
+ },
+ {
+ "comment": "unsupported with clause in union statement",
+ "query": "with x as (select * from user) select * from x union select * from x",
+ "plan": "unsupported: with expression in union statement"
+ },
+ {
+ "comment": "scatter aggregate with complex select list (can't build order by)",
+ "query": "select distinct a+1 from user",
+ "v3-plan": "generating order by clause: cannot reference a complex expression",
+ "gen4-plan": "unsupported: in scatter query: complex order by expression: a + 1"
+ },
+ {
+ "comment": "aggregation on union",
+ "query": "select sum(col) from (select col from user union all select col from unsharded) t",
+ "v3-plan": "unsupported: cross-shard query with aggregates",
+ "gen4-plan": "using aggregation on top of a *planbuilder.concatenateGen4 plan is not yet supported"
+ },
+ {
+ "comment": "insert having subquery in row values",
+ "query": "insert into user(id, name) values ((select 1 from user where id = 1), 'A')",
+ "plan": "expr cannot be translated, not supported: (select 1 from `user` where id = 1)"
+ },
+ {
+ "comment": "lateral derived tables",
+ "query": "select * from user, lateral (select * from user_extra where user_id = user.id) t",
+ "plan": "unsupported: lateral derived tables"
+ },
+ {
+ "comment": "json_table expressions",
+ "query": "SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt",
+ "plan": "unsupported: json_table expressions"
+ },
+ {
+ "comment": "mix lock with other expr",
+ "query": "select get_lock('xyz', 10), 1 from dual",
+ "plan": "unsupported: lock function and other expression in same select query"
+ },
+ {
+ "comment": "systable union query in derived table with constraint on outside (star projection)",
+ "query": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'",
+ "v3-plan": "symbol constraint_name not found in table or subquery",
+ "gen4-plan": "unsupported: pushing projection 'constraint_name' on *sqlparser.Union"
+ },
+ {
+ "comment": "aggregation, where and derived tables - we can't push aggregations that might need a second layer of aggregation",
+ "query": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200",
+ "v3-plan": "unsupported: filtering on results of cross-shard subquery",
+ "gen4-plan": "expr cannot be translated, not supported: bazo between 100 and 200"
+ }
+]
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt
deleted file mode 100644
index 81627343248..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.txt
+++ /dev/null
@@ -1,458 +0,0 @@
-# union operations in subqueries (expressions)
-"select * from user where id in (select * from user union select * from user_extra)"
-"unsupported: '*' expression in cross-shard query"
-Gen4 plan same as above
-
-# TODO: Implement support for select with a target destination
-"select * from `user[-]`.user_metadata"
-"unsupported: SELECT with a target destination"
-Gen4 plan same as above
-
-# Unsupported INSERT statement with a target destination
-"insert into `user[-]`.user_metadata (a, b) values (1,2)"
-"unsupported: INSERT with a target destination"
-Gen4 plan same as above
-
-# Unsupported delete statement with a replica target destination
-"DELETE FROM `user[-]@replica`.user_metadata limit 1"
-"unsupported: delete statement with a replica target"
-Gen4 plan same as above
-
-# Unsupported update statement with a replica target destination
-"update `user[-]@replica`.user_metadata set id=2"
-"unsupported: update statement with a replica target"
-Gen4 plan same as above
-
-# scatter order by with * expression
-"select * from user order by id"
-"unsupported: in scatter query: order by must reference a column in the select list: id asc"
-Gen4 error: unsupported: '*' expression in cross-shard query
-
-# order by rand on a cross-shard subquery
-"select id from (select user.id, user.col from user join user_extra) as t order by rand()"
-"unsupported: memory sort: complex order by expression: rand()"
-Gen4 error: unsupported: in scatter query: complex order by expression: rand()
-
-# natural join
-"select * from user natural join user_extra"
-"unsupported: natural join"
-Gen4 plan same as above
-
-# join with USING construct
-"select * from user join user_extra using(id)"
-"unsupported: join with USING(column_list) clause for complex queries"
-Gen4 error: can't handle JOIN USING without authoritative tables
-
-# join with USING construct with 3 tables
-"select user.id from user join user_extra using(id) join music using(id2)"
-"unsupported: join with USING(column_list) clause for complex queries"
-Gen4 error: can't handle JOIN USING without authoritative tables
-
-# natural left join
-"select * from user natural left join user_extra"
-"unsupported: natural left join"
-Gen4 plan same as above
-
-# natural right join
-"select * from user natural right join user_extra"
-"unsupported: natural right join"
-Gen4 plan same as above
-
-# * expresson not allowed for cross-shard joins
-"select * from user join user_extra"
-"unsupported: '*' expression in cross-shard query"
-Gen4 plan same as above
-
-# Group by column number, used with non-aliased expression (duplicated code)
-"select * from user group by 1"
-"unsupported: '*' expression in cross-shard query"
-Gen4 error: cannot use column offsets in group statement when using `*`
-
-# complex group by expression
-"select a from user group by a+1"
-"unsupported: in scatter query: only simple references allowed"
-Gen4 error: unsupported: in scatter query: complex order by expression: a + 1
-
-# Complex aggregate expression on scatter
-"select 1+count(*) from user"
-"unsupported: in scatter query: complex aggregate expression"
-Gen4 plan same as above
-
-# Multi-value aggregates not supported
-"select count(a,b) from user"
-"unsupported: only one expression allowed inside aggregates: count(a, b)"
-Gen4 error: aggregate functions take a single argument 'count(a, b)'
-
-# scatter aggregate complex order by
-"select id from user group by id order by id+1"
-"unsupported: in scatter query: complex order by expression: id + 1"
-Gen4 plan same as above
-
-# Scatter order by is complex with aggregates in select
-"select col, count(*) from user group by col order by col+1"
-"unsupported: in scatter query: complex order by expression: col + 1"
-Gen4 plan same as above
-
-# Aggregate detection (group_concat)
-"select group_concat(user.a) from user join user_extra"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: unsupported: in scatter query: aggregation function 'group_concat'
-
-# subqueries not supported in group by
-"select id from user group by id, (select id from user_extra)"
-"unsupported: subqueries disallowed in GROUP or ORDER BY"
-Gen4 error: unsupported: subqueries disallowed in GROUP BY
-
-# Order by uses cross-shard expression
-"select id from user order by id+1"
-"unsupported: in scatter query: complex order by expression: id + 1"
-Gen4 plan same as above
-
-# Order by column number with collate
-"select user.col1 as a from user order by 1 collate utf8_general_ci"
-"unsupported: in scatter query: complex order by expression: 1 collate utf8_general_ci"
-Gen4 error: unsupported: in scatter query: complex order by expression: a collate utf8_general_ci
-
-# subqueries in delete
-"delete from user where col = (select id from unsharded)"
-"unsupported: subqueries in sharded DML"
-Gen4 plan same as above
-
-# sharded subqueries in unsharded delete
-"delete from unsharded where col = (select id from user)"
-"unsupported: sharded subqueries in DML"
-Gen4 plan same as above
-
-# sharded delete with limit clasue
-"delete from user_extra limit 10"
-"multi shard delete with limit is not supported"
-Gen4 plan same as above
-
-# sharded subquery in unsharded subquery in unsharded delete
-"delete from unsharded where col = (select id from unsharded where id = (select id from user))"
-"unsupported: sharded subqueries in DML"
-Gen4 plan same as above
-
-# sharded join unsharded subqueries in unsharded delete
-"delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)"
-"unsupported: sharded subqueries in DML"
-Gen4 plan same as above
-
-# scatter update with limit clause
-"update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1"
-"multi shard update with limit is not supported"
-Gen4 plan same as above
-
-# multi delete multi table
-"delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'"
-"unsupported: multi-shard or vindex write statement"
-Gen4 plan same as above
-
-# update changes primary vindex column
-"update user set id = 1 where id = 1"
-"unsupported: You can't update primary vindex columns. Invalid update on vindex: user_index"
-Gen4 plan same as above
-
-# update change in multicol vindex column
-"update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2"
-"unsupported: You can't update primary vindex columns. Invalid update on vindex: multicolIdx"
-Gen4 plan same as above
-
-# update changes non lookup vindex column
-"update user_metadata set md5 = 1 where user_id = 1"
-"unsupported: You can only update lookup vindexes. Invalid update on vindex: user_md5_index"
-Gen4 plan same as above
-
-# update with complex set clause
-"update music set id = id + 1 where id = 1"
-"unsupported: Only values are supported. Invalid update on column: `id` with expr: [id + 1]"
-Gen4 plan same as above
-
-# update by primary keyspace id, changing one vindex column, limit without order clause
-"update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10"
-"unsupported: Need to provide order by clause when using limit. Invalid update on vindex: email_user_map"
-Gen4 plan same as above
-
-# update with derived table
-"update (select id from user) as u set id = 4"
-"unsupported: subqueries in sharded DML"
-Gen4 error: The target table u of the UPDATE is not updatable
-
-# join in update tables
-"update user join user_extra on user.id = user_extra.id set user.name = 'foo'"
-"unsupported: multi-shard or vindex write statement"
-Gen4 error: unsupported: multiple tables in update
-
-# multiple tables in update
-"update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id"
-"unsupported: multi-shard or vindex write statement"
-Gen4 error: unsupported: multiple tables in update
-
-# unsharded insert, unqualified names and auto-inc combined
-"insert into unsharded_auto select col from unsharded"
-"unsupported: auto-inc and select in insert"
-Gen4 plan same as above
-
-# unsharded insert, no col list with auto-inc
-"insert into unsharded_auto values(1,1)"
-"column list required for tables with auto-inc columns"
-Gen4 plan same as above
-
-# unsharded insert, col list does not match values
-"insert into unsharded_auto(id, val) values(1)"
-"column list doesn't match values"
-Gen4 plan same as above
-
-# sharded upsert can't change vindex
-"insert into user(id) values(1) on duplicate key update id = 3"
-"unsupported: DML cannot change vindex column"
-Gen4 plan same as above
-
-# sharded upsert can't change vindex using values function
-"insert into music(user_id, id) values(1, 2) on duplicate key update user_id = values(id)"
-"unsupported: DML cannot change vindex column"
-Gen4 plan same as above
-
-# sharded replace no vindex
-"replace into user(val) values(1, 'foo')"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# sharded replace with vindex
-"replace into user(id, name) values(1, 'foo')"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace no column list
-"replace into user values(1, 2, 3)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with mimatched column list
-"replace into user(id) values (1, 2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with one vindex
-"replace into user(id) values (1)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with non vindex on vindex-enabled table
-"replace into user(nonid) values (2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with all vindexes supplied
-"replace into user(nonid, name, id) values (2, 'foo', 1)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace for non-vindex autoinc
-"replace into user_extra(nonid) values (2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-# replace with multiple rows
-"replace into user(id) values (1), (2)"
-"unsupported: REPLACE INTO with sharded schema"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where id = 1 and id = 2"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (multiple filters)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where func(id)"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (not a comparison)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where id > 1"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (not equality)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where 1 = id"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not a column)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where keyspace_id = 1"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (lhs is not id)"
-Gen4 plan same as above
-
-"select keyspace_id from user_index where id = id+1"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (rhs is not a value)"
-Gen4 plan same as above
-
-# vindex func without where condition
-"select keyspace_id from user_index"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (where clause missing)"
-Gen4 plan same as above
-
-# vindex func in subquery without where
-"select id from user where exists(select keyspace_id from user_index)"
-"unsupported: where clause for vindex function must be of the form id = or id in(,...) (where clause missing)"
-Gen4 plan same as above
-
-"select func(keyspace_id) from user_index where id = :id"
-"unsupported: expression on results of a vindex function"
-Gen4 plan same as above
-
-# delete with multi-table targets
-"delete music,user from music inner join user where music.id = user.id"
-"unsupported: multi-shard or vindex write statement"
-Gen4 plan same as above
-
-# select get_lock with non-dual table
-"select get_lock('xyz', 10) from user"
-"get_lock('xyz', 10) allowed only with dual"
-Gen4 plan same as above
-
-# select is_free_lock with non-dual table
-"select is_free_lock('xyz') from user"
-"is_free_lock('xyz') allowed only with dual"
-Gen4 plan same as above
-
-# union with SQL_CALC_FOUND_ROWS
-"(select sql_calc_found_rows id from user where id = 1 limit 1) union select id from user where id = 1"
-"SQL_CALC_FOUND_ROWS not supported with union"
-Gen4 plan same as above
-
-# set with DEFAULT - vitess aware
-"set workload = default"
-"DEFAULT not supported for @@workload"
-Gen4 plan same as above
-
-# set with DEFAULT - reserved connection
-"set sql_mode = default"
-"DEFAULT not supported for @@sql_mode"
-Gen4 plan same as above
-
-# Multi shard query using into outfile s3
-"select * from user into outfile s3 'out_file_name'"
-"INTO is not supported on sharded keyspace"
-Gen4 plan same as above
-
-# create view with Cannot auto-resolve for cross-shard joins
-"create view user.view_a as select col from user join user_extra"
-"symbol col not found"
-Gen4 error: Column 'col' in field list is ambiguous
-
-# create view with join that cannot be served in each shard separately
-"create view user.view_a as select user_extra.id from user join user_extra"
-"Complex select queries are not supported in create or alter view statements"
-Gen4 plan same as above
-
-# create view with sharded limit
-"create view user.view_a as select id from user order by id limit 10"
-"Complex select queries are not supported in create or alter view statements"
-Gen4 plan same as above
-
-# create view with top level subquery in select
-"create view user.view_a as select a, (select col from user) from unsharded"
-"Complex select queries are not supported in create or alter view statements"
-Gen4 plan same as above
-
-# create view with sql_calc_found_rows with limit
-"create view user.view_a as select sql_calc_found_rows * from music limit 100"
-"Complex select queries are not supported in create or alter view statements"
-Gen4 plan same as above
-
-# create view with sql_calc_found_rows with group by and having
-"create view user.view_a as select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2"
-"Complex select queries are not supported in create or alter view statements"
-Gen4 plan same as above
-
-# create view with incompatible keyspaces
-"create view main.view_a as select * from user.user_extra"
-"Select query does not belong to the same keyspace as the view statement"
-Gen4 plan same as above
-
-# avg function on scatter query
-"select avg(id) from user"
-"unsupported: in scatter query: complex aggregate expression"
-Gen4 error: unsupported: in scatter query: aggregation function 'avg'
-
-# scatter aggregate with ambiguous aliases
-"select distinct a, b as a from user"
-"generating order by clause: ambiguous symbol reference: a"
-Gen4 plan same as above
-
-# subquery of information_schema with itself and star expression in outer select
-"select a.*, u.id from information_schema.a a, user u where a.id in (select * from information_schema.b)"
-"unsupported: '*' expression in cross-shard query"
-Gen4 plan same as above
-
-# outer and inner subquery route reference the same "uu.id" name
-# but they refer to different things. The first reference is to the outermost query,
-# and the second reference is to the innermost 'from' subquery.
-# This query will never work as the inner derived table is only selecting one of the column
-"select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select id from user_extra where user_id = 5) uu where uu.user_id = uu.id))"
-"unsupported: cross-shard correlated subquery"
-Gen4 error: unsupported: unable to split predicates to derived table: uu.user_id = uu.id
-
-# outer and inner subquery route reference the same "uu.id" name
-# but they refer to different things. The first reference is to the outermost query,
-# and the second reference is to the innermost 'from' subquery.
-# changed to project all the columns from the derived tables.
-"select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select col, id, user_id from user_extra where user_id = 5) uu where uu.user_id = uu.id))"
-"unsupported: cross-shard correlated subquery"
-Gen4 plan same as above
-
-# Gen4 does a rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.
-"select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2"
-"can't do ORDER BY on top of UNION"
-Gen4 error: Column 'id' in field list is ambiguous
-
-# unsupported with clause in delete statement
-"with x as (select * from user) delete from x"
-"unsupported: with expression in delete statement"
-Gen4 plan same as above
-
-# unsupported with clause in update statement
-"with x as (select * from user) update x set name = 'f'"
-"unsupported: with expression in update statement"
-Gen4 plan same as above
-
-# unsupported with clause in select statement
-"with x as (select * from user) select * from x"
-"unsupported: with expression in select statement"
-Gen4 plan same as above
-
-# unsupported with clause in union statement
-"with x as (select * from user) select * from x union select * from x"
-"unsupported: with expression in union statement"
-Gen4 plan same as above
-
-# scatter aggregate with complex select list (can't build order by)
-"select distinct a+1 from user"
-"generating order by clause: cannot reference a complex expression"
-Gen4 error: unsupported: in scatter query: complex order by expression: a + 1
-
-# aggregation on union
-"select sum(col) from (select col from user union all select col from unsharded) t"
-"unsupported: cross-shard query with aggregates"
-Gen4 error: using aggregation on top of a *planbuilder.concatenateGen4 plan is not yet supported
-
-# systable union query in derived table with constraint on outside (without star projection)
-"select id from (select id from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select id from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `id` = 'primary'"
-"unsupported: filtering on results of cross-shard subquery"
-Gen4 error: can't push predicates on concatenate
-
-# insert having subquery in row values
-"insert into user(id, name) values ((select 1 from user where id = 1), 'A')"
-"expr cannot be translated, not supported: (select 1 from `user` where id = 1)"
-Gen4 plan same as above
-
-# lateral derived tables
-"select * from user, lateral (select * from user_extra where user_id = user.id) t"
-"unsupported: lateral derived tables"
-Gen4 plan same as above
-
-# json_table expressions
-"SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt"
-"unsupported: json_table expressions"
-Gen4 plan same as above
-
-# mix lock with other expr
-"select get_lock('xyz', 10), 1 from dual"
-"unsupported: lock function and other expression in same select query"
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/use_cases.json b/go/vt/vtgate/planbuilder/testdata/use_cases.json
new file mode 100644
index 00000000000..ed328fc30d3
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/use_cases.json
@@ -0,0 +1,61 @@
+[
+ {
+ "comment": "use db",
+ "query": "use ks",
+ "plan": {
+ "QueryType": "USE",
+ "Original": "use ks",
+ "Instructions": {
+ "OperatorType": "UpdateTarget",
+ "target": "ks"
+ }
+ }
+ },
+ {
+ "comment": "use db tablet",
+ "query": "use ks@replica",
+ "plan": {
+ "QueryType": "USE",
+ "Original": "use ks@replica",
+ "Instructions": {
+ "OperatorType": "UpdateTarget",
+ "target": "ks@replica"
+ }
+ }
+ },
+ {
+ "comment": "use db target with :",
+ "query": "use `ks:-80@replica`",
+ "plan": {
+ "QueryType": "USE",
+ "Original": "use `ks:-80@replica`",
+ "Instructions": {
+ "OperatorType": "UpdateTarget",
+ "target": "ks:-80@replica"
+ }
+ }
+ },
+ {
+ "comment": "use db target with /",
+ "query": "use `ks/80-@replica`",
+ "plan": {
+ "QueryType": "USE",
+ "Original": "use `ks/80-@replica`",
+ "Instructions": {
+ "OperatorType": "UpdateTarget",
+ "target": "ks/80-@replica"
+ }
+ }
+ },
+ {
+ "comment": "reset db",
+ "query": "use",
+ "plan": {
+ "QueryType": "USE",
+ "Original": "use",
+ "Instructions": {
+ "OperatorType": "UpdateTarget"
+ }
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/use_cases.txt b/go/vt/vtgate/planbuilder/testdata/use_cases.txt
deleted file mode 100644
index bc7ce1e9687..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/use_cases.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-# use db
-"use ks"
-{
- "QueryType": "USE",
- "Original": "use ks",
- "Instructions": {
- "OperatorType": "UpdateTarget",
- "target": "ks"
- }
-}
-Gen4 plan same as above
-
-# use db tablet
-"use ks@replica"
-{
- "QueryType": "USE",
- "Original": "use ks@replica",
- "Instructions": {
- "OperatorType": "UpdateTarget",
- "target": "ks@replica"
- }
-}
-Gen4 plan same as above
-
-# use db target with :
-"use `ks:-80@replica`"
-{
- "QueryType": "USE",
- "Original": "use `ks:-80@replica`",
- "Instructions": {
- "OperatorType": "UpdateTarget",
- "target": "ks:-80@replica"
- }
-}
-Gen4 plan same as above
-
-# use db target with /
-"use `ks/80-@replica`"
-{
- "QueryType": "USE",
- "Original": "use `ks/80-@replica`",
- "Instructions": {
- "OperatorType": "UpdateTarget",
- "target": "ks/80-@replica"
- }
-}
-Gen4 plan same as above
-
-# reset db
-"use"
-{
- "QueryType": "USE",
- "Original": "use",
- "Instructions": {
- "OperatorType": "UpdateTarget"
- }
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json
new file mode 100644
index 00000000000..873536a9a79
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json
@@ -0,0 +1,728 @@
+[
+ {
+ "comment": "vindex func read all cols",
+ "query": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5
+ ],
+ "Fields": {
+ "hex_keyspace_id": "VARBINARY",
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY",
+ "shard": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5
+ ],
+ "Fields": {
+ "hex_keyspace_id": "VARBINARY",
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY",
+ "shard": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "vindex func select *",
+ "query": "select * from user_index where id = :id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user_index where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5
+ ],
+ "Fields": {
+ "hex_keyspace_id": "VARBINARY",
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY",
+ "shard": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select * from user_index where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5
+ ],
+ "Fields": {
+ "hex_keyspace_id": "VARBINARY",
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY",
+ "range_end": "VARBINARY",
+ "range_start": "VARBINARY",
+ "shard": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "vindex func read with id repeated",
+ "query": "select id, keyspace_id, id from user_index where id = :id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, id from user_index where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, id from user_index where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "ambiguous vindex reference",
+ "query": "select id, keyspace_id, id from hash_dup where id = :id",
+ "plan": "ambiguous vindex reference: hash_dup"
+ },
+ {
+ "comment": "disambiguated vindex reference",
+ "query": "select id, keyspace_id, id from second_user.hash_dup where id = :id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "hash_dup"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id",
+ "Instructions": {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "hash_dup"
+ },
+ "TablesUsed": [
+ "hash_dup"
+ ]
+ }
+ },
+ {
+ "comment": "You can even join with a vindexFunc primitive",
+ "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1
+ ],
+ "Fields": {
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1
+ ],
+ "Fields": {
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "Join vindexFunc on RHS",
+ "query": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:0",
+ "TableName": "unsharded_",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1
+ ],
+ "Fields": {
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:0",
+ "TableName": "unsharded_",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1
+ ],
+ "Fields": {
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "Join with vindexFunc on a column of it, already present in select list",
+ "query": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_index_id": 0
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_index_id": 0
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "Join with vindexFunc on a column of it, already present at the end of the select list",
+ "query": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1,R:0",
+ "JoinVars": {
+ "user_index_id": 1
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,L:0,R:0",
+ "JoinVars": {
+ "user_index_id": 0
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "Join with vindexFunc on a column of it, not present in select list",
+ "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "user_index_id": 1
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "user_index_id": 0
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user_index"
+ ]
+ }
+ },
+ {
+ "comment": "Join with aliased table name",
+ "query": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "ui_id": 1
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 1,
+ 0
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :ui_id",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "ui_id": 0
+ },
+ "TableName": "_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "VindexFunc",
+ "Variant": "VindexMap",
+ "Columns": [
+ 0,
+ 1
+ ],
+ "Fields": {
+ "id": "VARBINARY",
+ "keyspace_id": "VARBINARY"
+ },
+ "Value": ":id",
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
+ "Query": "select unsharded.id from unsharded where unsharded.id = :ui_id",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user_index"
+ ]
+ }
+ },
+ {
+ "query": "select none from user_index where id = :id",
+ "v3-plan": "symbol `none` not found in table or subquery",
+ "gen4-plan": "symbol `none` not found"
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt
deleted file mode 100644
index 6a47ec47e0f..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.txt
+++ /dev/null
@@ -1,714 +0,0 @@
-# vindex func read all cols
-"select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id"
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3,
- 4,
- 5
- ],
- "Fields": {
- "hex_keyspace_id": "VARBINARY",
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY",
- "shard": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3,
- 4,
- 5
- ],
- "Fields": {
- "hex_keyspace_id": "VARBINARY",
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY",
- "shard": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user_index"
- ]
-}
-
-# vindex func select *
-"select * from user_index where id = :id"
-{
- "QueryType": "SELECT",
- "Original": "select * from user_index where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3,
- 4,
- 5
- ],
- "Fields": {
- "hex_keyspace_id": "VARBINARY",
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY",
- "shard": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select * from user_index where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 2,
- 3,
- 4,
- 5
- ],
- "Fields": {
- "hex_keyspace_id": "VARBINARY",
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY",
- "range_end": "VARBINARY",
- "range_start": "VARBINARY",
- "shard": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user_index"
- ]
-}
-
-# vindex func read with id repeated
-"select id, keyspace_id, id from user_index where id = :id"
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, id from user_index where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, id from user_index where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user_index"
- ]
-}
-
-# ambiguous vindex reference
-"select id, keyspace_id, id from hash_dup where id = :id"
-"ambiguous vindex reference: hash_dup"
-Gen4 plan same as above
-
-# disambiguated vindex reference
-"select id, keyspace_id, id from second_user.hash_dup where id = :id"
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "hash_dup"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id",
- "Instructions": {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "hash_dup"
- },
- "TablesUsed": [
- "hash_dup"
- ]
-}
-
-# You can even join with a vindexFunc primitive
-"select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id"
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1
- ],
- "Fields": {
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1
- ],
- "Fields": {
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user_index"
- ]
-}
-
-# Join vindexFunc on RHS
-"select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id"
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:0",
- "TableName": "unsharded_",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1
- ],
- "Fields": {
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:0",
- "TableName": "unsharded_",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1
- ],
- "Fields": {
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user_index"
- ]
-}
-
-# Join with vindexFunc on a column of it, already present in select list
-"select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id"
-{
- "QueryType": "SELECT",
- "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_index_id": 0
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_index_id": 0
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user_index"
- ]
-}
-
-# Join with vindexFunc on a column of it, already present at the end of the select list
-"select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id"
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1,R:0",
- "JoinVars": {
- "user_index_id": 1
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,L:0,R:0",
- "JoinVars": {
- "user_index_id": 0
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user_index"
- ]
-}
-
-# Join with vindexFunc on a column of it, not present in select list
-"select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id"
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "user_index_id": 1
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "user_index_id": 0
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user_index"
- ]
-}
-
-# Join with aliased table name
-"select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id"
-{
- "QueryType": "SELECT",
- "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "ui_id": 1
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 1,
- 0
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :ui_id",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "ui_id": 0
- },
- "TableName": "_unsharded",
- "Inputs": [
- {
- "OperatorType": "VindexFunc",
- "Variant": "VindexMap",
- "Columns": [
- 0,
- 1
- ],
- "Fields": {
- "id": "VARBINARY",
- "keyspace_id": "VARBINARY"
- },
- "Value": ":id",
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id from unsharded where 1 != 1",
- "Query": "select unsharded.id from unsharded where unsharded.id = :ui_id",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user_index"
- ]
-}
-
-"select none from user_index where id = :id"
-"symbol `none` not found in table or subquery"
-Gen4 error: symbol `none` not found
diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_schema_test.json b/go/vt/vtgate/planbuilder/testdata/vschemas/oltp_schema.json
similarity index 93%
rename from go/vt/vtgate/planbuilder/testdata/oltp_schema_test.json
rename to go/vt/vtgate/planbuilder/testdata/vschemas/oltp_schema.json
index d002a61e81a..1ed06779ba7 100644
--- a/go/vt/vtgate/planbuilder/testdata/oltp_schema_test.json
+++ b/go/vt/vtgate/planbuilder/testdata/vschemas/oltp_schema.json
@@ -8,7 +8,7 @@
}
},
"tables": {
- "sbtest1": {
+ "sbtest1": {
"column_vindexes": [
{
"column": "id",
@@ -26,7 +26,7 @@
}
]
},
- "sbtest2": {
+ "sbtest2": {
"column_vindexes": [
{
"column": "id",
@@ -44,7 +44,7 @@
}
]
},
- "sbtest3": {
+ "sbtest3": {
"column_vindexes": [
{
"column": "id",
@@ -62,7 +62,7 @@
}
]
},
- "sbtest4": {
+ "sbtest4": {
"column_vindexes": [
{
"column": "id",
@@ -80,7 +80,7 @@
}
]
},
- "sbtest5": {
+ "sbtest5": {
"column_vindexes": [
{
"column": "id",
@@ -98,7 +98,7 @@
}
]
},
- "sbtest6": {
+ "sbtest6": {
"column_vindexes": [
{
"column": "id",
@@ -116,7 +116,7 @@
}
]
},
- "sbtest7": {
+ "sbtest7": {
"column_vindexes": [
{
"column": "id",
@@ -134,7 +134,7 @@
}
]
},
- "sbtest8": {
+ "sbtest8": {
"column_vindexes": [
{
"column": "id",
@@ -152,7 +152,7 @@
}
]
},
- "sbtest9": {
+ "sbtest9": {
"column_vindexes": [
{
"column": "id",
@@ -170,7 +170,7 @@
}
]
},
- "sbtest10": {
+ "sbtest10": {
"column_vindexes": [
{
"column": "id",
@@ -188,7 +188,7 @@
}
]
},
- "sbtest11": {
+ "sbtest11": {
"column_vindexes": [
{
"column": "id",
@@ -206,7 +206,7 @@
}
]
},
- "sbtest12": {
+ "sbtest12": {
"column_vindexes": [
{
"column": "id",
@@ -224,7 +224,7 @@
}
]
},
- "sbtest13": {
+ "sbtest13": {
"column_vindexes": [
{
"column": "id",
@@ -242,7 +242,7 @@
}
]
},
- "sbtest14": {
+ "sbtest14": {
"column_vindexes": [
{
"column": "id",
@@ -260,7 +260,7 @@
}
]
},
- "sbtest15": {
+ "sbtest15": {
"column_vindexes": [
{
"column": "id",
@@ -278,7 +278,7 @@
}
]
},
- "sbtest16": {
+ "sbtest16": {
"column_vindexes": [
{
"column": "id",
@@ -296,7 +296,7 @@
}
]
},
- "sbtest17": {
+ "sbtest17": {
"column_vindexes": [
{
"column": "id",
@@ -314,7 +314,7 @@
}
]
},
- "sbtest18": {
+ "sbtest18": {
"column_vindexes": [
{
"column": "id",
@@ -332,7 +332,7 @@
}
]
},
- "sbtest19": {
+ "sbtest19": {
"column_vindexes": [
{
"column": "id",
@@ -350,7 +350,7 @@
}
]
},
- "sbtest20": {
+ "sbtest20": {
"column_vindexes": [
{
"column": "id",
@@ -368,7 +368,7 @@
}
]
},
- "sbtest21": {
+ "sbtest21": {
"column_vindexes": [
{
"column": "id",
@@ -386,7 +386,7 @@
}
]
},
- "sbtest22": {
+ "sbtest22": {
"column_vindexes": [
{
"column": "id",
@@ -404,7 +404,7 @@
}
]
},
- "sbtest23": {
+ "sbtest23": {
"column_vindexes": [
{
"column": "id",
@@ -422,7 +422,7 @@
}
]
},
- "sbtest24": {
+ "sbtest24": {
"column_vindexes": [
{
"column": "id",
@@ -440,7 +440,7 @@
}
]
},
- "sbtest25": {
+ "sbtest25": {
"column_vindexes": [
{
"column": "id",
@@ -458,7 +458,7 @@
}
]
},
- "sbtest26": {
+ "sbtest26": {
"column_vindexes": [
{
"column": "id",
@@ -476,7 +476,7 @@
}
]
},
- "sbtest27": {
+ "sbtest27": {
"column_vindexes": [
{
"column": "id",
@@ -494,7 +494,7 @@
}
]
},
- "sbtest28": {
+ "sbtest28": {
"column_vindexes": [
{
"column": "id",
@@ -512,7 +512,7 @@
}
]
},
- "sbtest29": {
+ "sbtest29": {
"column_vindexes": [
{
"column": "id",
@@ -530,7 +530,7 @@
}
]
},
- "sbtest30": {
+ "sbtest30": {
"column_vindexes": [
{
"column": "id",
@@ -548,7 +548,7 @@
}
]
},
- "sbtest31": {
+ "sbtest31": {
"column_vindexes": [
{
"column": "id",
@@ -566,7 +566,7 @@
}
]
},
- "sbtest32": {
+ "sbtest32": {
"column_vindexes": [
{
"column": "id",
@@ -584,7 +584,7 @@
}
]
},
- "sbtest33": {
+ "sbtest33": {
"column_vindexes": [
{
"column": "id",
@@ -602,7 +602,7 @@
}
]
},
- "sbtest34": {
+ "sbtest34": {
"column_vindexes": [
{
"column": "id",
@@ -620,7 +620,7 @@
}
]
},
- "sbtest35": {
+ "sbtest35": {
"column_vindexes": [
{
"column": "id",
@@ -638,7 +638,7 @@
}
]
},
- "sbtest36": {
+ "sbtest36": {
"column_vindexes": [
{
"column": "id",
@@ -656,7 +656,7 @@
}
]
},
- "sbtest37": {
+ "sbtest37": {
"column_vindexes": [
{
"column": "id",
@@ -674,7 +674,7 @@
}
]
},
- "sbtest38": {
+ "sbtest38": {
"column_vindexes": [
{
"column": "id",
@@ -692,7 +692,7 @@
}
]
},
- "sbtest39": {
+ "sbtest39": {
"column_vindexes": [
{
"column": "id",
@@ -710,7 +710,7 @@
}
]
},
- "sbtest40": {
+ "sbtest40": {
"column_vindexes": [
{
"column": "id",
@@ -728,7 +728,7 @@
}
]
},
- "sbtest41": {
+ "sbtest41": {
"column_vindexes": [
{
"column": "id",
@@ -746,7 +746,7 @@
}
]
},
- "sbtest42": {
+ "sbtest42": {
"column_vindexes": [
{
"column": "id",
@@ -764,7 +764,7 @@
}
]
},
- "sbtest43": {
+ "sbtest43": {
"column_vindexes": [
{
"column": "id",
@@ -782,7 +782,7 @@
}
]
},
- "sbtest44": {
+ "sbtest44": {
"column_vindexes": [
{
"column": "id",
@@ -800,7 +800,7 @@
}
]
},
- "sbtest45": {
+ "sbtest45": {
"column_vindexes": [
{
"column": "id",
@@ -818,7 +818,7 @@
}
]
},
- "sbtest46": {
+ "sbtest46": {
"column_vindexes": [
{
"column": "id",
@@ -836,7 +836,7 @@
}
]
},
- "sbtest47": {
+ "sbtest47": {
"column_vindexes": [
{
"column": "id",
@@ -854,7 +854,7 @@
}
]
},
- "sbtest48": {
+ "sbtest48": {
"column_vindexes": [
{
"column": "id",
@@ -872,7 +872,7 @@
}
]
},
- "sbtest49": {
+ "sbtest49": {
"column_vindexes": [
{
"column": "id",
@@ -890,7 +890,7 @@
}
]
},
- "sbtest50": {
+ "sbtest50": {
"column_vindexes": [
{
"column": "id",
@@ -908,7 +908,7 @@
}
]
},
- "sbtest51": {
+ "sbtest51": {
"column_vindexes": [
{
"column": "id",
@@ -926,7 +926,7 @@
}
]
},
- "sbtest52": {
+ "sbtest52": {
"column_vindexes": [
{
"column": "id",
@@ -944,7 +944,7 @@
}
]
},
- "sbtest53": {
+ "sbtest53": {
"column_vindexes": [
{
"column": "id",
@@ -962,7 +962,7 @@
}
]
},
- "sbtest54": {
+ "sbtest54": {
"column_vindexes": [
{
"column": "id",
@@ -980,7 +980,7 @@
}
]
},
- "sbtest55": {
+ "sbtest55": {
"column_vindexes": [
{
"column": "id",
@@ -998,7 +998,7 @@
}
]
},
- "sbtest56": {
+ "sbtest56": {
"column_vindexes": [
{
"column": "id",
@@ -1016,7 +1016,7 @@
}
]
},
- "sbtest57": {
+ "sbtest57": {
"column_vindexes": [
{
"column": "id",
@@ -1034,7 +1034,7 @@
}
]
},
- "sbtest58": {
+ "sbtest58": {
"column_vindexes": [
{
"column": "id",
@@ -1052,7 +1052,7 @@
}
]
},
- "sbtest59": {
+ "sbtest59": {
"column_vindexes": [
{
"column": "id",
@@ -1070,7 +1070,7 @@
}
]
},
- "sbtest60": {
+ "sbtest60": {
"column_vindexes": [
{
"column": "id",
@@ -1088,7 +1088,7 @@
}
]
},
- "sbtest61": {
+ "sbtest61": {
"column_vindexes": [
{
"column": "id",
@@ -1106,7 +1106,7 @@
}
]
},
- "sbtest62": {
+ "sbtest62": {
"column_vindexes": [
{
"column": "id",
@@ -1124,7 +1124,7 @@
}
]
},
- "sbtest63": {
+ "sbtest63": {
"column_vindexes": [
{
"column": "id",
@@ -1142,7 +1142,7 @@
}
]
},
- "sbtest64": {
+ "sbtest64": {
"column_vindexes": [
{
"column": "id",
@@ -1160,7 +1160,7 @@
}
]
},
- "sbtest65": {
+ "sbtest65": {
"column_vindexes": [
{
"column": "id",
@@ -1178,7 +1178,7 @@
}
]
},
- "sbtest66": {
+ "sbtest66": {
"column_vindexes": [
{
"column": "id",
@@ -1196,7 +1196,7 @@
}
]
},
- "sbtest67": {
+ "sbtest67": {
"column_vindexes": [
{
"column": "id",
@@ -1214,7 +1214,7 @@
}
]
},
- "sbtest68": {
+ "sbtest68": {
"column_vindexes": [
{
"column": "id",
@@ -1232,7 +1232,7 @@
}
]
},
- "sbtest69": {
+ "sbtest69": {
"column_vindexes": [
{
"column": "id",
@@ -1250,7 +1250,7 @@
}
]
},
- "sbtest70": {
+ "sbtest70": {
"column_vindexes": [
{
"column": "id",
@@ -1268,7 +1268,7 @@
}
]
},
- "sbtest71": {
+ "sbtest71": {
"column_vindexes": [
{
"column": "id",
@@ -1286,7 +1286,7 @@
}
]
},
- "sbtest72": {
+ "sbtest72": {
"column_vindexes": [
{
"column": "id",
@@ -1304,7 +1304,7 @@
}
]
},
- "sbtest73": {
+ "sbtest73": {
"column_vindexes": [
{
"column": "id",
@@ -1322,7 +1322,7 @@
}
]
},
- "sbtest74": {
+ "sbtest74": {
"column_vindexes": [
{
"column": "id",
@@ -1340,7 +1340,7 @@
}
]
},
- "sbtest75": {
+ "sbtest75": {
"column_vindexes": [
{
"column": "id",
@@ -1358,7 +1358,7 @@
}
]
},
- "sbtest76": {
+ "sbtest76": {
"column_vindexes": [
{
"column": "id",
@@ -1376,7 +1376,7 @@
}
]
},
- "sbtest77": {
+ "sbtest77": {
"column_vindexes": [
{
"column": "id",
@@ -1394,7 +1394,7 @@
}
]
},
- "sbtest78": {
+ "sbtest78": {
"column_vindexes": [
{
"column": "id",
@@ -1412,7 +1412,7 @@
}
]
},
- "sbtest79": {
+ "sbtest79": {
"column_vindexes": [
{
"column": "id",
@@ -1430,7 +1430,7 @@
}
]
},
- "sbtest80": {
+ "sbtest80": {
"column_vindexes": [
{
"column": "id",
@@ -1448,7 +1448,7 @@
}
]
},
- "sbtest81": {
+ "sbtest81": {
"column_vindexes": [
{
"column": "id",
@@ -1466,7 +1466,7 @@
}
]
},
- "sbtest82": {
+ "sbtest82": {
"column_vindexes": [
{
"column": "id",
@@ -1484,7 +1484,7 @@
}
]
},
- "sbtest83": {
+ "sbtest83": {
"column_vindexes": [
{
"column": "id",
@@ -1502,7 +1502,7 @@
}
]
},
- "sbtest84": {
+ "sbtest84": {
"column_vindexes": [
{
"column": "id",
@@ -1520,7 +1520,7 @@
}
]
},
- "sbtest85": {
+ "sbtest85": {
"column_vindexes": [
{
"column": "id",
@@ -1538,7 +1538,7 @@
}
]
},
- "sbtest86": {
+ "sbtest86": {
"column_vindexes": [
{
"column": "id",
@@ -1556,7 +1556,7 @@
}
]
},
- "sbtest87": {
+ "sbtest87": {
"column_vindexes": [
{
"column": "id",
@@ -1574,7 +1574,7 @@
}
]
},
- "sbtest88": {
+ "sbtest88": {
"column_vindexes": [
{
"column": "id",
@@ -1592,7 +1592,7 @@
}
]
},
- "sbtest89": {
+ "sbtest89": {
"column_vindexes": [
{
"column": "id",
@@ -1610,7 +1610,7 @@
}
]
},
- "sbtest90": {
+ "sbtest90": {
"column_vindexes": [
{
"column": "id",
@@ -1628,7 +1628,7 @@
}
]
},
- "sbtest91": {
+ "sbtest91": {
"column_vindexes": [
{
"column": "id",
@@ -1646,7 +1646,7 @@
}
]
},
- "sbtest92": {
+ "sbtest92": {
"column_vindexes": [
{
"column": "id",
@@ -1664,7 +1664,7 @@
}
]
},
- "sbtest93": {
+ "sbtest93": {
"column_vindexes": [
{
"column": "id",
@@ -1682,7 +1682,7 @@
}
]
},
- "sbtest94": {
+ "sbtest94": {
"column_vindexes": [
{
"column": "id",
@@ -1700,7 +1700,7 @@
}
]
},
- "sbtest95": {
+ "sbtest95": {
"column_vindexes": [
{
"column": "id",
@@ -1718,7 +1718,7 @@
}
]
},
- "sbtest96": {
+ "sbtest96": {
"column_vindexes": [
{
"column": "id",
@@ -1736,7 +1736,7 @@
}
]
},
- "sbtest97": {
+ "sbtest97": {
"column_vindexes": [
{
"column": "id",
@@ -1754,7 +1754,7 @@
}
]
},
- "sbtest98": {
+ "sbtest98": {
"column_vindexes": [
{
"column": "id",
@@ -1772,7 +1772,7 @@
}
]
},
- "sbtest99": {
+ "sbtest99": {
"column_vindexes": [
{
"column": "id",
@@ -1790,7 +1790,7 @@
}
]
},
- "sbtest100": {
+ "sbtest100": {
"column_vindexes": [
{
"column": "id",
@@ -1808,7 +1808,7 @@
}
]
},
- "sbtest101": {
+ "sbtest101": {
"column_vindexes": [
{
"column": "id",
@@ -1826,7 +1826,7 @@
}
]
},
- "sbtest102": {
+ "sbtest102": {
"column_vindexes": [
{
"column": "id",
@@ -1844,7 +1844,7 @@
}
]
},
- "sbtest103": {
+ "sbtest103": {
"column_vindexes": [
{
"column": "id",
@@ -1862,7 +1862,7 @@
}
]
},
- "sbtest104": {
+ "sbtest104": {
"column_vindexes": [
{
"column": "id",
@@ -1880,7 +1880,7 @@
}
]
},
- "sbtest105": {
+ "sbtest105": {
"column_vindexes": [
{
"column": "id",
@@ -1898,7 +1898,7 @@
}
]
},
- "sbtest106": {
+ "sbtest106": {
"column_vindexes": [
{
"column": "id",
@@ -1916,7 +1916,7 @@
}
]
},
- "sbtest107": {
+ "sbtest107": {
"column_vindexes": [
{
"column": "id",
@@ -1934,7 +1934,7 @@
}
]
},
- "sbtest108": {
+ "sbtest108": {
"column_vindexes": [
{
"column": "id",
@@ -1952,7 +1952,7 @@
}
]
},
- "sbtest109": {
+ "sbtest109": {
"column_vindexes": [
{
"column": "id",
@@ -1970,7 +1970,7 @@
}
]
},
- "sbtest110": {
+ "sbtest110": {
"column_vindexes": [
{
"column": "id",
@@ -1988,7 +1988,7 @@
}
]
},
- "sbtest111": {
+ "sbtest111": {
"column_vindexes": [
{
"column": "id",
@@ -2006,7 +2006,7 @@
}
]
},
- "sbtest112": {
+ "sbtest112": {
"column_vindexes": [
{
"column": "id",
@@ -2024,7 +2024,7 @@
}
]
},
- "sbtest113": {
+ "sbtest113": {
"column_vindexes": [
{
"column": "id",
@@ -2042,7 +2042,7 @@
}
]
},
- "sbtest114": {
+ "sbtest114": {
"column_vindexes": [
{
"column": "id",
@@ -2060,7 +2060,7 @@
}
]
},
- "sbtest115": {
+ "sbtest115": {
"column_vindexes": [
{
"column": "id",
@@ -2078,7 +2078,7 @@
}
]
},
- "sbtest116": {
+ "sbtest116": {
"column_vindexes": [
{
"column": "id",
@@ -2096,7 +2096,7 @@
}
]
},
- "sbtest117": {
+ "sbtest117": {
"column_vindexes": [
{
"column": "id",
@@ -2114,7 +2114,7 @@
}
]
},
- "sbtest118": {
+ "sbtest118": {
"column_vindexes": [
{
"column": "id",
@@ -2132,7 +2132,7 @@
}
]
},
- "sbtest119": {
+ "sbtest119": {
"column_vindexes": [
{
"column": "id",
@@ -2150,7 +2150,7 @@
}
]
},
- "sbtest120": {
+ "sbtest120": {
"column_vindexes": [
{
"column": "id",
@@ -2168,7 +2168,7 @@
}
]
},
- "sbtest121": {
+ "sbtest121": {
"column_vindexes": [
{
"column": "id",
@@ -2186,7 +2186,7 @@
}
]
},
- "sbtest122": {
+ "sbtest122": {
"column_vindexes": [
{
"column": "id",
@@ -2204,7 +2204,7 @@
}
]
},
- "sbtest123": {
+ "sbtest123": {
"column_vindexes": [
{
"column": "id",
@@ -2222,7 +2222,7 @@
}
]
},
- "sbtest124": {
+ "sbtest124": {
"column_vindexes": [
{
"column": "id",
@@ -2240,7 +2240,7 @@
}
]
},
- "sbtest125": {
+ "sbtest125": {
"column_vindexes": [
{
"column": "id",
@@ -2258,7 +2258,7 @@
}
]
},
- "sbtest126": {
+ "sbtest126": {
"column_vindexes": [
{
"column": "id",
@@ -2276,7 +2276,7 @@
}
]
},
- "sbtest127": {
+ "sbtest127": {
"column_vindexes": [
{
"column": "id",
@@ -2294,7 +2294,7 @@
}
]
},
- "sbtest128": {
+ "sbtest128": {
"column_vindexes": [
{
"column": "id",
@@ -2312,7 +2312,7 @@
}
]
},
- "sbtest129": {
+ "sbtest129": {
"column_vindexes": [
{
"column": "id",
@@ -2330,7 +2330,7 @@
}
]
},
- "sbtest130": {
+ "sbtest130": {
"column_vindexes": [
{
"column": "id",
@@ -2348,7 +2348,7 @@
}
]
},
- "sbtest131": {
+ "sbtest131": {
"column_vindexes": [
{
"column": "id",
@@ -2366,7 +2366,7 @@
}
]
},
- "sbtest132": {
+ "sbtest132": {
"column_vindexes": [
{
"column": "id",
@@ -2384,7 +2384,7 @@
}
]
},
- "sbtest133": {
+ "sbtest133": {
"column_vindexes": [
{
"column": "id",
@@ -2402,7 +2402,7 @@
}
]
},
- "sbtest134": {
+ "sbtest134": {
"column_vindexes": [
{
"column": "id",
@@ -2420,7 +2420,7 @@
}
]
},
- "sbtest135": {
+ "sbtest135": {
"column_vindexes": [
{
"column": "id",
@@ -2438,7 +2438,7 @@
}
]
},
- "sbtest136": {
+ "sbtest136": {
"column_vindexes": [
{
"column": "id",
@@ -2456,7 +2456,7 @@
}
]
},
- "sbtest137": {
+ "sbtest137": {
"column_vindexes": [
{
"column": "id",
@@ -2474,7 +2474,7 @@
}
]
},
- "sbtest138": {
+ "sbtest138": {
"column_vindexes": [
{
"column": "id",
@@ -2492,7 +2492,7 @@
}
]
},
- "sbtest139": {
+ "sbtest139": {
"column_vindexes": [
{
"column": "id",
@@ -2510,7 +2510,7 @@
}
]
},
- "sbtest140": {
+ "sbtest140": {
"column_vindexes": [
{
"column": "id",
@@ -2528,7 +2528,7 @@
}
]
},
- "sbtest141": {
+ "sbtest141": {
"column_vindexes": [
{
"column": "id",
@@ -2546,7 +2546,7 @@
}
]
},
- "sbtest142": {
+ "sbtest142": {
"column_vindexes": [
{
"column": "id",
@@ -2564,7 +2564,7 @@
}
]
},
- "sbtest143": {
+ "sbtest143": {
"column_vindexes": [
{
"column": "id",
@@ -2582,7 +2582,7 @@
}
]
},
- "sbtest144": {
+ "sbtest144": {
"column_vindexes": [
{
"column": "id",
@@ -2600,7 +2600,7 @@
}
]
},
- "sbtest145": {
+ "sbtest145": {
"column_vindexes": [
{
"column": "id",
@@ -2618,7 +2618,7 @@
}
]
},
- "sbtest146": {
+ "sbtest146": {
"column_vindexes": [
{
"column": "id",
@@ -2636,7 +2636,7 @@
}
]
},
- "sbtest147": {
+ "sbtest147": {
"column_vindexes": [
{
"column": "id",
@@ -2654,7 +2654,7 @@
}
]
},
- "sbtest148": {
+ "sbtest148": {
"column_vindexes": [
{
"column": "id",
@@ -2672,7 +2672,7 @@
}
]
},
- "sbtest149": {
+ "sbtest149": {
"column_vindexes": [
{
"column": "id",
@@ -2690,7 +2690,7 @@
}
]
},
- "sbtest150": {
+ "sbtest150": {
"column_vindexes": [
{
"column": "id",
@@ -2708,7 +2708,7 @@
}
]
},
- "sbtest151": {
+ "sbtest151": {
"column_vindexes": [
{
"column": "id",
@@ -2726,7 +2726,7 @@
}
]
},
- "sbtest152": {
+ "sbtest152": {
"column_vindexes": [
{
"column": "id",
@@ -2744,7 +2744,7 @@
}
]
},
- "sbtest153": {
+ "sbtest153": {
"column_vindexes": [
{
"column": "id",
@@ -2762,7 +2762,7 @@
}
]
},
- "sbtest154": {
+ "sbtest154": {
"column_vindexes": [
{
"column": "id",
@@ -2780,7 +2780,7 @@
}
]
},
- "sbtest155": {
+ "sbtest155": {
"column_vindexes": [
{
"column": "id",
@@ -2798,7 +2798,7 @@
}
]
},
- "sbtest156": {
+ "sbtest156": {
"column_vindexes": [
{
"column": "id",
@@ -2816,7 +2816,7 @@
}
]
},
- "sbtest157": {
+ "sbtest157": {
"column_vindexes": [
{
"column": "id",
@@ -2834,7 +2834,7 @@
}
]
},
- "sbtest158": {
+ "sbtest158": {
"column_vindexes": [
{
"column": "id",
@@ -2852,7 +2852,7 @@
}
]
},
- "sbtest159": {
+ "sbtest159": {
"column_vindexes": [
{
"column": "id",
@@ -2870,7 +2870,7 @@
}
]
},
- "sbtest160": {
+ "sbtest160": {
"column_vindexes": [
{
"column": "id",
@@ -2888,7 +2888,7 @@
}
]
},
- "sbtest161": {
+ "sbtest161": {
"column_vindexes": [
{
"column": "id",
@@ -2906,7 +2906,7 @@
}
]
},
- "sbtest162": {
+ "sbtest162": {
"column_vindexes": [
{
"column": "id",
@@ -2924,7 +2924,7 @@
}
]
},
- "sbtest163": {
+ "sbtest163": {
"column_vindexes": [
{
"column": "id",
@@ -2942,7 +2942,7 @@
}
]
},
- "sbtest164": {
+ "sbtest164": {
"column_vindexes": [
{
"column": "id",
@@ -2960,7 +2960,7 @@
}
]
},
- "sbtest165": {
+ "sbtest165": {
"column_vindexes": [
{
"column": "id",
@@ -2978,7 +2978,7 @@
}
]
},
- "sbtest166": {
+ "sbtest166": {
"column_vindexes": [
{
"column": "id",
@@ -2996,7 +2996,7 @@
}
]
},
- "sbtest167": {
+ "sbtest167": {
"column_vindexes": [
{
"column": "id",
@@ -3014,7 +3014,7 @@
}
]
},
- "sbtest168": {
+ "sbtest168": {
"column_vindexes": [
{
"column": "id",
@@ -3032,7 +3032,7 @@
}
]
},
- "sbtest169": {
+ "sbtest169": {
"column_vindexes": [
{
"column": "id",
@@ -3050,7 +3050,7 @@
}
]
},
- "sbtest170": {
+ "sbtest170": {
"column_vindexes": [
{
"column": "id",
@@ -3068,7 +3068,7 @@
}
]
},
- "sbtest171": {
+ "sbtest171": {
"column_vindexes": [
{
"column": "id",
@@ -3086,7 +3086,7 @@
}
]
},
- "sbtest172": {
+ "sbtest172": {
"column_vindexes": [
{
"column": "id",
@@ -3104,7 +3104,7 @@
}
]
},
- "sbtest173": {
+ "sbtest173": {
"column_vindexes": [
{
"column": "id",
@@ -3122,7 +3122,7 @@
}
]
},
- "sbtest174": {
+ "sbtest174": {
"column_vindexes": [
{
"column": "id",
@@ -3140,7 +3140,7 @@
}
]
},
- "sbtest175": {
+ "sbtest175": {
"column_vindexes": [
{
"column": "id",
@@ -3158,7 +3158,7 @@
}
]
},
- "sbtest176": {
+ "sbtest176": {
"column_vindexes": [
{
"column": "id",
@@ -3176,7 +3176,7 @@
}
]
},
- "sbtest177": {
+ "sbtest177": {
"column_vindexes": [
{
"column": "id",
@@ -3194,7 +3194,7 @@
}
]
},
- "sbtest178": {
+ "sbtest178": {
"column_vindexes": [
{
"column": "id",
@@ -3212,7 +3212,7 @@
}
]
},
- "sbtest179": {
+ "sbtest179": {
"column_vindexes": [
{
"column": "id",
@@ -3230,7 +3230,7 @@
}
]
},
- "sbtest180": {
+ "sbtest180": {
"column_vindexes": [
{
"column": "id",
@@ -3248,7 +3248,7 @@
}
]
},
- "sbtest181": {
+ "sbtest181": {
"column_vindexes": [
{
"column": "id",
@@ -3266,7 +3266,7 @@
}
]
},
- "sbtest182": {
+ "sbtest182": {
"column_vindexes": [
{
"column": "id",
@@ -3284,7 +3284,7 @@
}
]
},
- "sbtest183": {
+ "sbtest183": {
"column_vindexes": [
{
"column": "id",
@@ -3302,7 +3302,7 @@
}
]
},
- "sbtest184": {
+ "sbtest184": {
"column_vindexes": [
{
"column": "id",
@@ -3320,7 +3320,7 @@
}
]
},
- "sbtest185": {
+ "sbtest185": {
"column_vindexes": [
{
"column": "id",
@@ -3338,7 +3338,7 @@
}
]
},
- "sbtest186": {
+ "sbtest186": {
"column_vindexes": [
{
"column": "id",
@@ -3356,7 +3356,7 @@
}
]
},
- "sbtest187": {
+ "sbtest187": {
"column_vindexes": [
{
"column": "id",
@@ -3374,7 +3374,7 @@
}
]
},
- "sbtest188": {
+ "sbtest188": {
"column_vindexes": [
{
"column": "id",
@@ -3392,7 +3392,7 @@
}
]
},
- "sbtest189": {
+ "sbtest189": {
"column_vindexes": [
{
"column": "id",
@@ -3410,7 +3410,7 @@
}
]
},
- "sbtest190": {
+ "sbtest190": {
"column_vindexes": [
{
"column": "id",
@@ -3428,7 +3428,7 @@
}
]
},
- "sbtest191": {
+ "sbtest191": {
"column_vindexes": [
{
"column": "id",
@@ -3446,7 +3446,7 @@
}
]
},
- "sbtest192": {
+ "sbtest192": {
"column_vindexes": [
{
"column": "id",
@@ -3464,7 +3464,7 @@
}
]
},
- "sbtest193": {
+ "sbtest193": {
"column_vindexes": [
{
"column": "id",
@@ -3482,7 +3482,7 @@
}
]
},
- "sbtest194": {
+ "sbtest194": {
"column_vindexes": [
{
"column": "id",
@@ -3500,7 +3500,7 @@
}
]
},
- "sbtest195": {
+ "sbtest195": {
"column_vindexes": [
{
"column": "id",
@@ -3518,7 +3518,7 @@
}
]
},
- "sbtest196": {
+ "sbtest196": {
"column_vindexes": [
{
"column": "id",
@@ -3536,7 +3536,7 @@
}
]
},
- "sbtest197": {
+ "sbtest197": {
"column_vindexes": [
{
"column": "id",
@@ -3554,7 +3554,7 @@
}
]
},
- "sbtest198": {
+ "sbtest198": {
"column_vindexes": [
{
"column": "id",
@@ -3572,7 +3572,7 @@
}
]
},
- "sbtest199": {
+ "sbtest199": {
"column_vindexes": [
{
"column": "id",
@@ -3590,7 +3590,7 @@
}
]
},
- "sbtest200": {
+ "sbtest200": {
"column_vindexes": [
{
"column": "id",
@@ -3608,7 +3608,7 @@
}
]
},
- "sbtest201": {
+ "sbtest201": {
"column_vindexes": [
{
"column": "id",
@@ -3626,7 +3626,7 @@
}
]
},
- "sbtest202": {
+ "sbtest202": {
"column_vindexes": [
{
"column": "id",
@@ -3644,7 +3644,7 @@
}
]
},
- "sbtest203": {
+ "sbtest203": {
"column_vindexes": [
{
"column": "id",
@@ -3662,7 +3662,7 @@
}
]
},
- "sbtest204": {
+ "sbtest204": {
"column_vindexes": [
{
"column": "id",
@@ -3680,7 +3680,7 @@
}
]
},
- "sbtest205": {
+ "sbtest205": {
"column_vindexes": [
{
"column": "id",
@@ -3698,7 +3698,7 @@
}
]
},
- "sbtest206": {
+ "sbtest206": {
"column_vindexes": [
{
"column": "id",
@@ -3716,7 +3716,7 @@
}
]
},
- "sbtest207": {
+ "sbtest207": {
"column_vindexes": [
{
"column": "id",
@@ -3734,7 +3734,7 @@
}
]
},
- "sbtest208": {
+ "sbtest208": {
"column_vindexes": [
{
"column": "id",
@@ -3752,7 +3752,7 @@
}
]
},
- "sbtest209": {
+ "sbtest209": {
"column_vindexes": [
{
"column": "id",
@@ -3770,7 +3770,7 @@
}
]
},
- "sbtest210": {
+ "sbtest210": {
"column_vindexes": [
{
"column": "id",
@@ -3788,7 +3788,7 @@
}
]
},
- "sbtest211": {
+ "sbtest211": {
"column_vindexes": [
{
"column": "id",
@@ -3806,7 +3806,7 @@
}
]
},
- "sbtest212": {
+ "sbtest212": {
"column_vindexes": [
{
"column": "id",
@@ -3824,7 +3824,7 @@
}
]
},
- "sbtest213": {
+ "sbtest213": {
"column_vindexes": [
{
"column": "id",
@@ -3842,7 +3842,7 @@
}
]
},
- "sbtest214": {
+ "sbtest214": {
"column_vindexes": [
{
"column": "id",
@@ -3860,7 +3860,7 @@
}
]
},
- "sbtest215": {
+ "sbtest215": {
"column_vindexes": [
{
"column": "id",
@@ -3878,7 +3878,7 @@
}
]
},
- "sbtest216": {
+ "sbtest216": {
"column_vindexes": [
{
"column": "id",
@@ -3896,7 +3896,7 @@
}
]
},
- "sbtest217": {
+ "sbtest217": {
"column_vindexes": [
{
"column": "id",
@@ -3914,7 +3914,7 @@
}
]
},
- "sbtest218": {
+ "sbtest218": {
"column_vindexes": [
{
"column": "id",
@@ -3932,7 +3932,7 @@
}
]
},
- "sbtest219": {
+ "sbtest219": {
"column_vindexes": [
{
"column": "id",
@@ -3950,7 +3950,7 @@
}
]
},
- "sbtest220": {
+ "sbtest220": {
"column_vindexes": [
{
"column": "id",
@@ -3968,7 +3968,7 @@
}
]
},
- "sbtest221": {
+ "sbtest221": {
"column_vindexes": [
{
"column": "id",
@@ -3986,7 +3986,7 @@
}
]
},
- "sbtest222": {
+ "sbtest222": {
"column_vindexes": [
{
"column": "id",
@@ -4004,7 +4004,7 @@
}
]
},
- "sbtest223": {
+ "sbtest223": {
"column_vindexes": [
{
"column": "id",
@@ -4022,7 +4022,7 @@
}
]
},
- "sbtest224": {
+ "sbtest224": {
"column_vindexes": [
{
"column": "id",
@@ -4040,7 +4040,7 @@
}
]
},
- "sbtest225": {
+ "sbtest225": {
"column_vindexes": [
{
"column": "id",
@@ -4058,7 +4058,7 @@
}
]
},
- "sbtest226": {
+ "sbtest226": {
"column_vindexes": [
{
"column": "id",
@@ -4076,7 +4076,7 @@
}
]
},
- "sbtest227": {
+ "sbtest227": {
"column_vindexes": [
{
"column": "id",
@@ -4094,7 +4094,7 @@
}
]
},
- "sbtest228": {
+ "sbtest228": {
"column_vindexes": [
{
"column": "id",
@@ -4112,7 +4112,7 @@
}
]
},
- "sbtest229": {
+ "sbtest229": {
"column_vindexes": [
{
"column": "id",
@@ -4130,7 +4130,7 @@
}
]
},
- "sbtest230": {
+ "sbtest230": {
"column_vindexes": [
{
"column": "id",
@@ -4148,7 +4148,7 @@
}
]
},
- "sbtest231": {
+ "sbtest231": {
"column_vindexes": [
{
"column": "id",
@@ -4166,7 +4166,7 @@
}
]
},
- "sbtest232": {
+ "sbtest232": {
"column_vindexes": [
{
"column": "id",
@@ -4184,7 +4184,7 @@
}
]
},
- "sbtest233": {
+ "sbtest233": {
"column_vindexes": [
{
"column": "id",
@@ -4202,7 +4202,7 @@
}
]
},
- "sbtest234": {
+ "sbtest234": {
"column_vindexes": [
{
"column": "id",
@@ -4220,7 +4220,7 @@
}
]
},
- "sbtest235": {
+ "sbtest235": {
"column_vindexes": [
{
"column": "id",
@@ -4238,7 +4238,7 @@
}
]
},
- "sbtest236": {
+ "sbtest236": {
"column_vindexes": [
{
"column": "id",
@@ -4256,7 +4256,7 @@
}
]
},
- "sbtest237": {
+ "sbtest237": {
"column_vindexes": [
{
"column": "id",
@@ -4274,7 +4274,7 @@
}
]
},
- "sbtest238": {
+ "sbtest238": {
"column_vindexes": [
{
"column": "id",
@@ -4292,7 +4292,7 @@
}
]
},
- "sbtest239": {
+ "sbtest239": {
"column_vindexes": [
{
"column": "id",
@@ -4310,7 +4310,7 @@
}
]
},
- "sbtest240": {
+ "sbtest240": {
"column_vindexes": [
{
"column": "id",
@@ -4328,7 +4328,7 @@
}
]
},
- "sbtest241": {
+ "sbtest241": {
"column_vindexes": [
{
"column": "id",
@@ -4346,7 +4346,7 @@
}
]
},
- "sbtest242": {
+ "sbtest242": {
"column_vindexes": [
{
"column": "id",
@@ -4364,7 +4364,7 @@
}
]
},
- "sbtest243": {
+ "sbtest243": {
"column_vindexes": [
{
"column": "id",
@@ -4382,7 +4382,7 @@
}
]
},
- "sbtest244": {
+ "sbtest244": {
"column_vindexes": [
{
"column": "id",
@@ -4400,7 +4400,7 @@
}
]
},
- "sbtest245": {
+ "sbtest245": {
"column_vindexes": [
{
"column": "id",
@@ -4418,7 +4418,7 @@
}
]
},
- "sbtest246": {
+ "sbtest246": {
"column_vindexes": [
{
"column": "id",
@@ -4436,7 +4436,7 @@
}
]
},
- "sbtest247": {
+ "sbtest247": {
"column_vindexes": [
{
"column": "id",
@@ -4454,7 +4454,7 @@
}
]
},
- "sbtest248": {
+ "sbtest248": {
"column_vindexes": [
{
"column": "id",
@@ -4472,7 +4472,7 @@
}
]
},
- "sbtest249": {
+ "sbtest249": {
"column_vindexes": [
{
"column": "id",
@@ -4490,7 +4490,7 @@
}
]
},
- "sbtest250": {
+ "sbtest250": {
"column_vindexes": [
{
"column": "id",
diff --git a/go/vt/vtgate/planbuilder/testdata/rails_schema_test.json b/go/vt/vtgate/planbuilder/testdata/vschemas/rails_schema.json
similarity index 100%
rename from go/vt/vtgate/planbuilder/testdata/rails_schema_test.json
rename to go/vt/vtgate/planbuilder/testdata/vschemas/rails_schema.json
diff --git a/go/vt/vtgate/planbuilder/testdata/schema_test.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
similarity index 89%
rename from go/vt/vtgate/planbuilder/testdata/schema_test.json
rename to go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
index ebd0b431b68..497b3da2500 100644
--- a/go/vt/vtgate/planbuilder/testdata/schema_test.json
+++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
@@ -1,27 +1,47 @@
{
"routing_rules": {
- "rules": [{
- "from_table": "route1",
- "to_tables": ["user.user"]
- }, {
- "from_table": "route2",
- "to_tables": ["main.unsharded"]
- }, {
- "from_table": "second_user.user",
- "to_tables": ["user.user"]
- }, {
- "from_table": "second_user.foo",
- "to_tables": ["user.user"]
- }, {
- "from_table": "primary_redirect@primary",
- "to_tables": ["user.user"]
- }, {
- "from_table": "bad_table",
- "to_tables": ["noks.noks"]
- }, {
- "from_table": "disabled",
- "to_tables": []
- }]
+ "rules": [
+ {
+ "from_table": "route1",
+ "to_tables": [
+ "user.user"
+ ]
+ },
+ {
+ "from_table": "route2",
+ "to_tables": [
+ "main.unsharded"
+ ]
+ },
+ {
+ "from_table": "second_user.user",
+ "to_tables": [
+ "user.user"
+ ]
+ },
+ {
+ "from_table": "second_user.foo",
+ "to_tables": [
+ "user.user"
+ ]
+ },
+ {
+ "from_table": "primary_redirect@primary",
+ "to_tables": [
+ "user.user"
+ ]
+ },
+ {
+ "from_table": "bad_table",
+ "to_tables": [
+ "noks.noks"
+ ]
+ },
+ {
+ "from_table": "disabled",
+ "to_tables": []
+ }
+ ]
},
"keyspaces": {
"user": {
@@ -247,7 +267,10 @@
"name": "cola_map"
},
{
- "columns": ["column_b", "column_c"],
+ "columns": [
+ "column_b",
+ "column_c"
+ ],
"name": "colb_colc_map"
}
]
@@ -259,7 +282,10 @@
"name": "kid_index"
},
{
- "columns": ["column_a", "kid"],
+ "columns": [
+ "column_a",
+ "kid"
+ ],
"name": "cola_kid_map"
}
]
@@ -311,7 +337,10 @@
"multicol_tbl": {
"column_vindexes": [
{
- "columns": ["cola", "colb"],
+ "columns": [
+ "cola",
+ "colb"
+ ],
"name": "multicolIdx"
},
{
@@ -430,6 +459,9 @@
},
"seq": {
"type": "sequence"
+ },
+ "unsharded_ref": {
+ "type": "reference"
}
}
},
diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_schema_test.json b/go/vt/vtgate/planbuilder/testdata/vschemas/tpcc_schema.json
similarity index 100%
rename from go/vt/vtgate/planbuilder/testdata/tpcc_schema_test.json
rename to go/vt/vtgate/planbuilder/testdata/vschemas/tpcc_schema.json
diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_schema_test.json b/go/vt/vtgate/planbuilder/testdata/vschemas/tpch_schema.json
similarity index 100%
rename from go/vt/vtgate/planbuilder/testdata/tpch_schema_test.json
rename to go/vt/vtgate/planbuilder/testdata/vschemas/tpch_schema.json
diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json
new file mode 100644
index 00000000000..30cc2d031ee
--- /dev/null
+++ b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json
@@ -0,0 +1,1717 @@
+[
+ {
+ "comment": "join on having clause",
+ "query": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:0,R:1",
+ "JoinVars": {
+ "uid": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1",
+ "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:2",
+ "JoinVars": {
+ "e_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id, e.col, e.id as eid from user_extra as e where 1 != 1",
+ "Query": "select e.id, e.col, e.id as eid from user_extra as e",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u where u.id = :e_id",
+ "Table": "`user`",
+ "Values": [
+ ":e_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "bind var already in use",
+ "query": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:0,R:1",
+ "JoinVars": {
+ "uid1": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1",
+ "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid1 and e.col = :uid",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:2",
+ "JoinVars": {
+ "e_id": 0
+ },
+ "TableName": "user_extra_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id, e.col, e.id as eid from user_extra as e where 1 != 1",
+ "Query": "select e.id, e.col, e.id as eid from user_extra as e where e.col = :uid",
+ "Table": "user_extra"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
+ "Query": "select u.id as uid from `user` as u where u.id = :e_id",
+ "Table": "`user`",
+ "Values": [
+ ":e_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up join with join, going left",
+ "query": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u1_col": 1
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.id, u1.col from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.col = :u1_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u1_col": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.col, u1.id from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.col = :u1_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up join with join, going left, then right",
+ "query": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u2_col": 1
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.id from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
+ "Query": "select u2.col from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.col = :u2_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.id from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinVars": {
+ "u2_col": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
+ "Query": "select u2.col from `user` as u2",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.col = :u2_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up join with join, reuse existing result from a lower join",
+ "query": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u1_col": 1
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "u1_col": 1
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.id, u1.col from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.col = :u1_col",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.col = :u1_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "u3_col": 0
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u3.col from `user` as u3 where 1 != 1",
+ "Query": "select u3.col from `user` as u3",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u1_col": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.col, u1.id from `user` as u1 where u1.col = :u3_col",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.col = :u1_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up join with join, reuse existing result from a lower join.\n# You need two levels of join nesting to test this: when u3 requests\n# col from u1, the u1-u2 joins exports the column to u2-u3. When\n# u4 requests it, it should be reused from the u1-u2 join.",
+ "query": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u1_col": 1
+ },
+ "TableName": "`user`_`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "u1_col": 1
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.id, u1.col from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
+ "Table": "`user`",
+ "Values": [
+ ":u1_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u4 where 1 != 1",
+ "Query": "select 1 from `user` as u4 where u4.col = :u1_col",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "TableName": "`user`_`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "u4_col": 0
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u4.col from `user` as u4 where 1 != 1",
+ "Query": "select u4.col from `user` as u4",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u1_col": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.col, u1.id from `user` as u1 where u1.col = :u4_col",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
+ "Table": "`user`",
+ "Values": [
+ ":u1_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Test reuse of join var already being supplied to the right of a node.",
+ "query": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "u1_col": 1
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
+ "Query": "select u1.id, u1.col from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = :u1_col",
+ "Table": "`user`",
+ "Values": [
+ ":u1_col"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
+ "Table": "`user`",
+ "Values": [
+ ":u1_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "u1_col": 0
+ },
+ "TableName": "`user`_`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,L:1",
+ "JoinVars": {
+ "u1_col": 0
+ },
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.col, u1.id from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = :u1_col",
+ "Table": "`user`",
+ "Values": [
+ ":u1_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
+ "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
+ "Table": "`user`",
+ "Values": [
+ ":u1_col"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Join on weird columns.",
+ "query": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "weird_name_a_b_c": 1
+ },
+ "TableName": "`weird``name`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name` where 1 != 1",
+ "Query": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name`",
+ "Table": "`weird``name`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.b from unsharded where 1 != 1",
+ "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0,L:1",
+ "JoinVars": {
+ "unsharded_id": 0
+ },
+ "TableName": "unsharded_`weird``name`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id, unsharded.b from unsharded where 1 != 1",
+ "Query": "select unsharded.id, unsharded.b from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `weird``name`.a from `weird``name` where 1 != 1",
+ "Query": "select `weird``name`.a from `weird``name` where `weird``name`.`a``b*c` = :unsharded_id",
+ "Table": "`weird``name`",
+ "Values": [
+ ":unsharded_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.weird`name"
+ ]
+ }
+ },
+ {
+ "comment": "Join on weird column (col is not in select)",
+ "query": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "R:0",
+ "JoinVars": {
+ "weird_name_a_b_c": 0
+ },
+ "TableName": "`weird``name`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `weird``name`.`a``b*c` from `weird``name` where 1 != 1",
+ "Query": "select `weird``name`.`a``b*c` from `weird``name`",
+ "Table": "`weird``name`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.b from unsharded where 1 != 1",
+ "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c",
+ "Table": "unsharded"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1",
+ "JoinVars": {
+ "unsharded_id": 0
+ },
+ "TableName": "unsharded_`weird``name`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select unsharded.id, unsharded.b from unsharded where 1 != 1",
+ "Query": "select unsharded.id, unsharded.b from unsharded",
+ "Table": "unsharded"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `weird``name` where 1 != 1",
+ "Query": "select 1 from `weird``name` where `weird``name`.`a``b*c` = :unsharded_id",
+ "Table": "`weird``name`",
+ "Values": [
+ ":unsharded_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.weird`name"
+ ]
+ }
+ },
+ {
+ "comment": "wire-up with limit primitive",
+ "query": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_col": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
+ "Query": "select u.id, u.col from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.id = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
+ "Query": "select u.col, u.id from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.id = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Wire-up in subquery",
+ "query": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0",
+ "JoinVars": {
+ "u_col": 1
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
+ "Query": "select u.id, u.col from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.id = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)",
+ "Instructions": {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutIn",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
+ "Query": "select u.col, u.id from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.id = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ ":__sq1"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Wire-up in underlying primitive after pullout",
+ "query": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq_has_values1",
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,L:1",
+ "JoinVars": {
+ "u_col": 2
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.id, :__sq1, u.col from `user` as u where 1 != 1",
+ "Query": "select u.id, :__sq1, u.col from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.id = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10",
+ "Instructions": {
+ "OperatorType": "Limit",
+ "Count": "INT64(10)",
+ "Inputs": [
+ {
+ "OperatorType": "Subquery",
+ "Variant": "PulloutValue",
+ "PulloutVars": [
+ "__sq1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select col from `user` where 1 != 1",
+ "Query": "select col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:1,R:0,L:2",
+ "JoinVars": {
+ "u_col": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.col, u.id, :__sq1 from `user` as u where 1 != 1",
+ "Query": "select u.col, u.id, :__sq1 from `user` as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select e.id from user_extra as e where 1 != 1",
+ "Query": "select e.id from user_extra as e where e.id = :u_col",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
+ {
+ "comment": "Invalid value in IN clause",
+ "query": "select id from user where id in (18446744073709551616, 1)",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (18446744073709551616, 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(DECIMAL(18446744073709551616), INT64(1))"
+ ],
+ "Vindex": "user_index"
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select id from user where id in (18446744073709551616, 1)",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select id from `user` where 1 != 1",
+ "Query": "select id from `user` where id in ::__vals",
+ "Table": "`user`",
+ "Values": [
+ "(DECIMAL(18446744073709551616), INT64(1))"
+ ],
+ "Vindex": "user_index"
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Invalid value in IN clause from LHS of join",
+ "query": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.id from `user` as u1 where u1.id = 18446744073709551616",
+ "Table": "`user`",
+ "Values": [
+ "DECIMAL(18446744073709551616)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.id from `user` as u1 where u1.id = 18446744073709551616",
+ "Table": "`user`",
+ "Values": [
+ "DECIMAL(18446744073709551616)"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "Invalid value in IN clause from RHS of join",
+ "query": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616",
+ "v3-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.id from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = 18446744073709551616",
+ "Table": "`user`",
+ "Values": [
+ "DECIMAL(18446744073709551616)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ }
+ },
+ "gen4-plan": {
+ "QueryType": "SELECT",
+ "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
+ "Query": "select u1.id from `user` as u1",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
+ "Query": "select 1 from `user` as u2 where u2.id = 18446744073709551616",
+ "Table": "`user`",
+ "Values": [
+ "DECIMAL(18446744073709551616)"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with column aliases not supported by v3, but planner is overridden with hint",
+ "query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "Instructions": {
+ "OperatorType": "VindexLookup",
+ "Variant": "Equal",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "Values": [
+ "INT64(1)"
+ ],
+ "Vindex": "name_user_map",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "IN",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
+ "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
+ "Table": "name_user_vdx",
+ "Values": [
+ ":name"
+ ],
+ "Vindex": "user_index"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "ByDestination",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
+ "Query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)",
+ "Table": "`user`"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user"
+ ]
+ }
+ },
+ {
+ "comment": "derived table with column aliases not supported by v3, but planner is overridden with hint",
+ "query": "select /*vt+ PLANNER=v3 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1",
+ "plan": "unsupported: column aliases in derived table"
+ },
+ {
+ "comment": "Three-way join using the left2right. The normal gen4 planner would merge m1 and m2 first, but the left to right doesnt",
+ "query": "select /*vt+ PLANNER=left2right */ user.col from user join unsharded as m1 join unsharded as m2",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select /*vt+ PLANNER=left2right */ user.col from user join unsharded as m1 join unsharded as m2",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "TableName": "`user`_unsharded",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.col from `user` where 1 != 1",
+ "Query": "select /*vt+ PLANNER=left2right */ `user`.col from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
+ "Query": "select /*vt+ PLANNER=left2right */ 1 from unsharded as m1",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Unsharded",
+ "Keyspace": {
+ "Name": "main",
+ "Sharded": false
+ },
+ "FieldQuery": "select 1 from unsharded as m2 where 1 != 1",
+ "Query": "select /*vt+ PLANNER=left2right */ 1 from unsharded as m2",
+ "Table": "unsharded"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "main.unsharded",
+ "user.user"
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt b/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt
deleted file mode 100644
index becba8162f4..00000000000
--- a/go/vt/vtgate/planbuilder/testdata/wireup_cases.txt
+++ /dev/null
@@ -1,1702 +0,0 @@
-# join on having clause
-"select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid"
-{
- "QueryType": "SELECT",
- "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:0,R:1",
- "JoinVars": {
- "uid": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1",
- "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:2",
- "JoinVars": {
- "e_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id, e.col, e.id as eid from user_extra as e where 1 != 1",
- "Query": "select e.id, e.col, e.id as eid from user_extra as e",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u where u.id = :e_id",
- "Table": "`user`",
- "Values": [
- ":e_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# bind var already in use
-"select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid"
-{
- "QueryType": "SELECT",
- "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:0,R:1",
- "JoinVars": {
- "uid1": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1",
- "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid1 and e.col = :uid",
- "Table": "user_extra"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:2",
- "JoinVars": {
- "e_id": 0
- },
- "TableName": "user_extra_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id, e.col, e.id as eid from user_extra as e where 1 != 1",
- "Query": "select e.id, e.col, e.id as eid from user_extra as e where e.col = :uid",
- "Table": "user_extra"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id as uid from `user` as u where 1 != 1",
- "Query": "select u.id as uid from `user` as u where u.id = :e_id",
- "Table": "`user`",
- "Values": [
- ":e_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# wire-up join with join, going left
-"select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u1_col": 1
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.id, u1.col from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.col = :u1_col",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u1_col": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.col, u1.id from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.col = :u1_col",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# wire-up join with join, going left, then right
-"select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u2_col": 1
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.id from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
- "Query": "select u2.col from `user` as u2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.col = :u2_col",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.id from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinVars": {
- "u2_col": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u2.col from `user` as u2 where 1 != 1",
- "Query": "select u2.col from `user` as u2",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.col = :u2_col",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# wire-up join with join, reuse existing result from a lower join
-"select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u1_col": 1
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "u1_col": 1
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.id, u1.col from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.col = :u1_col",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.col = :u1_col",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "u3_col": 0
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u3.col from `user` as u3 where 1 != 1",
- "Query": "select u3.col from `user` as u3",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u1_col": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.col, u1.id from `user` as u1 where u1.col = :u3_col",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.col = :u1_col",
- "Table": "`user`"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# wire-up join with join, reuse existing result from a lower join.
-# You need two levels of join nesting to test this: when u3 requests
-# col from u1, the u1-u2 joins exports the column to u2-u3. When
-# u4 requests it, it should be reused from the u1-u2 join.
-"select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u1_col": 1
- },
- "TableName": "`user`_`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "u1_col": 1
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.id, u1.col from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2",
- "Table": "`user`"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
- "Table": "`user`",
- "Values": [
- ":u1_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u4 where 1 != 1",
- "Query": "select 1 from `user` as u4 where u4.col = :u1_col",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "TableName": "`user`_`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "u4_col": 0
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u4.col from `user` as u4 where 1 != 1",
- "Query": "select u4.col from `user` as u4",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u1_col": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.col, u1.id from `user` as u1 where u1.col = :u4_col",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
- "Table": "`user`",
- "Values": [
- ":u1_col"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Test reuse of join var already being supplied to the right of a node.
-"select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "JoinVars": {
- "u1_col": 1
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1",
- "Query": "select u1.id, u1.col from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = :u1_col",
- "Table": "`user`",
- "Values": [
- ":u1_col"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
- "Table": "`user`",
- "Values": [
- ":u1_col"
- ],
- "Vindex": "user_index"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "u1_col": 0
- },
- "TableName": "`user`_`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,L:1",
- "JoinVars": {
- "u1_col": 0
- },
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.col, u1.id from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = :u1_col",
- "Table": "`user`",
- "Values": [
- ":u1_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u3 where 1 != 1",
- "Query": "select 1 from `user` as u3 where u3.id = :u1_col",
- "Table": "`user`",
- "Values": [
- ":u1_col"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Join on weird columns.
-"select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id"
-{
- "QueryType": "SELECT",
- "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "weird_name_a_b_c": 1
- },
- "TableName": "`weird``name`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name` where 1 != 1",
- "Query": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name`",
- "Table": "`weird``name`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.b from unsharded where 1 != 1",
- "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0,L:1",
- "JoinVars": {
- "unsharded_id": 0
- },
- "TableName": "unsharded_`weird``name`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id, unsharded.b from unsharded where 1 != 1",
- "Query": "select unsharded.id, unsharded.b from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `weird``name`.a from `weird``name` where 1 != 1",
- "Query": "select `weird``name`.a from `weird``name` where `weird``name`.`a``b*c` = :unsharded_id",
- "Table": "`weird``name`",
- "Values": [
- ":unsharded_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.weird`name"
- ]
-}
-
-# Join on weird column (col is not in select)
-"select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id"
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "R:0",
- "JoinVars": {
- "weird_name_a_b_c": 0
- },
- "TableName": "`weird``name`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `weird``name`.`a``b*c` from `weird``name` where 1 != 1",
- "Query": "select `weird``name`.`a``b*c` from `weird``name`",
- "Table": "`weird``name`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.b from unsharded where 1 != 1",
- "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c",
- "Table": "unsharded"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1",
- "JoinVars": {
- "unsharded_id": 0
- },
- "TableName": "unsharded_`weird``name`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select unsharded.id, unsharded.b from unsharded where 1 != 1",
- "Query": "select unsharded.id, unsharded.b from unsharded",
- "Table": "unsharded"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `weird``name` where 1 != 1",
- "Query": "select 1 from `weird``name` where `weird``name`.`a``b*c` = :unsharded_id",
- "Table": "`weird``name`",
- "Values": [
- ":unsharded_id"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.weird`name"
- ]
-}
-
-# wire-up with limit primitive
-"select u.id, e.id from user u join user_extra e where e.id = u.col limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_col": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
- "Query": "select u.id, u.col from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.id = :u_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "u_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
- "Query": "select u.col, u.id from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.id = :u_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Wire-up in subquery
-"select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)"
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0",
- "JoinVars": {
- "u_col": 1
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1",
- "Query": "select u.id, u.col from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.id = :u_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)",
- "Instructions": {
- "OperatorType": "Subquery",
- "Variant": "PulloutIn",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0",
- "JoinVars": {
- "u_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1",
- "Query": "select u.col, u.id from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.id = :u_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` where 1 != 1",
- "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals",
- "Table": "`user`",
- "Values": [
- ":__sq1"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Wire-up in underlying primitive after pullout
-"select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10"
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq_has_values1",
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0,R:0,L:1",
- "JoinVars": {
- "u_col": 2
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, :__sq1, u.col from `user` as u where 1 != 1",
- "Query": "select u.id, :__sq1, u.col from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.id = :u_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10",
- "Instructions": {
- "OperatorType": "Limit",
- "Count": "INT64(10)",
- "Inputs": [
- {
- "OperatorType": "Subquery",
- "Variant": "PulloutValue",
- "PulloutVars": [
- "__sq1"
- ],
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select col from `user` where 1 != 1",
- "Query": "select col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:1,R:0,L:2",
- "JoinVars": {
- "u_col": 0
- },
- "TableName": "`user`_user_extra",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.col, u.id, :__sq1 from `user` as u where 1 != 1",
- "Query": "select u.col, u.id, :__sq1 from `user` as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select e.id from user_extra as e where 1 != 1",
- "Query": "select e.id from user_extra as e where e.id = :u_col",
- "Table": "user_extra"
- }
- ]
- }
- ]
- }
- ]
- },
- "TablesUsed": [
- "user.user",
- "user.user_extra"
- ]
-}
-
-# Invalid value in IN clause
-"select id from user where id in (18446744073709551616, 1)"
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (18446744073709551616, 1)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(DECIMAL(18446744073709551616), INT64(1))"
- ],
- "Vindex": "user_index"
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select id from user where id in (18446744073709551616, 1)",
- "Instructions": {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select id from `user` where 1 != 1",
- "Query": "select id from `user` where id in ::__vals",
- "Table": "`user`",
- "Values": [
- "(DECIMAL(18446744073709551616), INT64(1))"
- ],
- "Vindex": "user_index"
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Invalid value in IN clause from LHS of join
-"select u1.id from user u1 join user u2 where u1.id = 18446744073709551616"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.id from `user` as u1 where u1.id = 18446744073709551616",
- "Table": "`user`",
- "Values": [
- "DECIMAL(18446744073709551616)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2",
- "Table": "`user`"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.id from `user` as u1 where u1.id = 18446744073709551616",
- "Table": "`user`",
- "Values": [
- "DECIMAL(18446744073709551616)"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# Invalid value in IN clause from RHS of join
-"select u1.id from user u1 join user u2 where u2.id = 18446744073709551616"
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.id from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 18446744073709551616",
- "Table": "`user`",
- "Values": [
- "DECIMAL(18446744073709551616)"
- ],
- "Vindex": "user_index"
- }
- ]
- }
-}
-{
- "QueryType": "SELECT",
- "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_`user`",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u1.id from `user` as u1 where 1 != 1",
- "Query": "select u1.id from `user` as u1",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "EqualUnique",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select 1 from `user` as u2 where 1 != 1",
- "Query": "select 1 from `user` as u2 where u2.id = 18446744073709551616",
- "Table": "`user`",
- "Values": [
- "DECIMAL(18446744073709551616)"
- ],
- "Vindex": "user_index"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-
-# derived table with column aliases not supported by v3, but planner is overridden with hint
-"select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1",
- "Instructions": {
- "OperatorType": "VindexLookup",
- "Variant": "Equal",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "Values": [
- "INT64(1)"
- ],
- "Vindex": "name_user_map",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "IN",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1",
- "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals",
- "Table": "name_user_vdx",
- "Values": [
- ":name"
- ],
- "Vindex": "user_index"
- },
- {
- "OperatorType": "Route",
- "Variant": "ByDestination",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1",
- "Query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)",
- "Table": "`user`"
- }
- ]
- },
- "TablesUsed": [
- "user.user"
- ]
-}
-Gen4 plan same as above
-
-# derived table with column aliases not supported by v3, but planner is overridden with hint
-"select /*vt+ PLANNER=v3 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1"
-"unsupported: column aliases in derived table"
-Gen4 plan same as above
-
-# Three-way join using the left2right. The normal gen4 planner would merge m1 and m2 first, but the left to right doesnt
-"select /*vt+ PLANNER=left2right */ user.col from user join unsharded as m1 join unsharded as m2"
-{
- "QueryType": "SELECT",
- "Original": "select /*vt+ PLANNER=left2right */ user.col from user join unsharded as m1 join unsharded as m2",
- "Instructions": {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded_unsharded",
- "Inputs": [
- {
- "OperatorType": "Join",
- "Variant": "Join",
- "JoinColumnIndexes": "L:0",
- "TableName": "`user`_unsharded",
- "Inputs": [
- {
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select `user`.col from `user` where 1 != 1",
- "Query": "select /*vt+ PLANNER=left2right */ `user`.col from `user`",
- "Table": "`user`"
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m1 where 1 != 1",
- "Query": "select /*vt+ PLANNER=left2right */ 1 from unsharded as m1",
- "Table": "unsharded"
- }
- ]
- },
- {
- "OperatorType": "Route",
- "Variant": "Unsharded",
- "Keyspace": {
- "Name": "main",
- "Sharded": false
- },
- "FieldQuery": "select 1 from unsharded as m2 where 1 != 1",
- "Query": "select /*vt+ PLANNER=left2right */ 1 from unsharded as m2",
- "Table": "unsharded"
- }
- ]
- },
- "TablesUsed": [
- "main.unsharded",
- "user.user"
- ]
-}
-Gen4 plan same as above
diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go
index e710df27755..af2509b94e7 100644
--- a/go/vt/vtgate/safe_session.go
+++ b/go/vt/vtgate/safe_session.go
@@ -151,11 +151,22 @@ func (session *SafeSession) ResetTx() {
session.Session.InTransaction = false
session.commitOrder = vtgatepb.CommitOrder_NORMAL
session.Savepoints = nil
- if !session.Session.InReservedConn {
- session.ShardSessions = nil
- session.PreSessions = nil
- session.PostSessions = nil
+ // If settings pools is enabled on the vttablet.
+ // This variable will be true but there will not be a shard session with reserved connection id.
+ // So, we should check the shard session and not just this variable.
+ if session.Session.InReservedConn {
+ allSessions := append(session.ShardSessions, append(session.PreSessions, session.PostSessions...)...)
+ for _, ss := range allSessions {
+ if ss.ReservedId != 0 {
+ // found that reserved connection exists.
+ // abort here, we should keep the shard sessions.
+ return
+ }
+ }
}
+ session.ShardSessions = nil
+ session.PreSessions = nil
+ session.PostSessions = nil
}
// Reset clears the session
diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go
index 403148bb2e8..680efac82f1 100644
--- a/go/vt/vtgate/schema/tracker.go
+++ b/go/vt/vtgate/schema/tracker.go
@@ -21,21 +21,19 @@ import (
"sync"
"time"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/vterrors"
-
- "vitess.io/vitess/go/vt/callerid"
-
- "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "golang.org/x/exp/maps"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
- querypb "vitess.io/vitess/go/vt/proto/query"
-
+ "vitess.io/vitess/go/vt/callerid"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/vindexes"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
)
type (
@@ -110,6 +108,10 @@ func (t *Tracker) Start() {
for {
select {
case th := <-t.ch:
+ if th == nil {
+ // channel closed
+ return
+ }
ksUpdater := t.getKeyspaceUpdateController(th)
ksUpdater.add(th)
case <-ctx.Done():
@@ -175,7 +177,7 @@ func (t *Tracker) Tables(ks string) map[string][]vindexes.Column {
return map[string][]vindexes.Column{} // we know nothing about this KS, so that is the info we can give out
}
- return m
+ return maps.Clone(m)
}
func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool {
@@ -275,7 +277,7 @@ func (tm *tableMap) delete(ks, tbl string) {
delete(m, tbl)
}
-// This empties out any previous schema for for all tables in a keyspace.
+// This empties out any previous schema for all tables in a keyspace.
// You should call this before initializing/loading a keyspace of the same
// name in the cache.
func (t *Tracker) clearKeyspaceTables(ks string) {
diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go
index ba30f0392c0..c68952143a3 100644
--- a/go/vt/vtgate/semantics/analyzer.go
+++ b/go/vt/vtgate/semantics/analyzer.go
@@ -63,7 +63,7 @@ func newAnalyzer(dbName string, si SchemaInformation) *analyzer {
// Analyze analyzes the parsed query.
func Analyze(statement sqlparser.Statement, currentDb string, si SchemaInformation) (*SemTable, error) {
- analyzer := newAnalyzer(currentDb, si)
+ analyzer := newAnalyzer(currentDb, newSchemaInfo(si))
// Analysis for initial scope
err := analyzer.analyze(statement)
@@ -105,7 +105,7 @@ func (a *analyzer) setError(err error) {
switch err := err.(type) {
case ProjError:
a.projErr = err.Inner
- case UnshardedError:
+ case ShardedError:
a.unshardedErr = err.Inner
default:
if a.inProjection > 0 && vterrors.ErrState(err) == vterrors.NonUniqError {
@@ -256,11 +256,11 @@ func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error {
switch node := cursor.Node().(type) {
case *sqlparser.Update:
if len(node.TableExprs) != 1 {
- return UnshardedError{Inner: vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multiple tables in update")}
+ return ShardedError{Inner: vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multiple tables in update")}
}
alias, isAlias := node.TableExprs[0].(*sqlparser.AliasedTableExpr)
if !isAlias {
- return UnshardedError{Inner: vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multiple tables in update")}
+ return ShardedError{Inner: vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: multiple tables in update")}
}
_, isDerived := alias.Expr.(*sqlparser.DerivedTable)
if isDerived {
@@ -353,12 +353,12 @@ func (p ProjError) Error() string {
return p.Inner.Error()
}
-// UnshardedError is used to mark an error as something that should only be returned
+// ShardedError is used to mark an error as something that should only be returned
// if the query is not unsharded
-type UnshardedError struct {
+type ShardedError struct {
Inner error
}
-func (p UnshardedError) Error() string {
+func (p ShardedError) Error() string {
return p.Inner.Error()
}
diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go
index f7bfe3b04f9..5b02fcc493d 100644
--- a/go/vt/vtgate/semantics/analyzer_test.go
+++ b/go/vt/vtgate/semantics/analyzer_test.go
@@ -75,6 +75,17 @@ func TestBindingSingleTablePositive(t *testing.T) {
}
}
+func TestInformationSchemaColumnInfo(t *testing.T) {
+ stmt, semTable := parseAndAnalyze(t, "select table_comment, file_name from information_schema.`TABLES`, information_schema.`FILES`", "d")
+
+ sel, _ := stmt.(*sqlparser.Select)
+ tables := SingleTableSet(0)
+ files := SingleTableSet(1)
+
+ assert.Equal(t, tables, semTable.RecursiveDeps(extract(sel, 0)))
+ assert.Equal(t, files, semTable.DirectDeps(extract(sel, 1)))
+}
+
func TestBindingSingleAliasedTablePositive(t *testing.T) {
queries := []string{
"select col from tabl as X",
@@ -113,10 +124,10 @@ func TestBindingSingleTableNegative(t *testing.T) {
t.Run(query, func(t *testing.T) {
parse, err := sqlparser.Parse(query)
require.NoError(t, err)
- _, err = Analyze(parse.(sqlparser.SelectStatement), "d", &FakeSI{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "symbol")
- require.Contains(t, err.Error(), "not found")
+ st, err := Analyze(parse.(sqlparser.SelectStatement), "d", &FakeSI{})
+ require.NoError(t, err)
+ require.ErrorContains(t, st.NotUnshardedErr, "symbol")
+ require.ErrorContains(t, st.NotUnshardedErr, "not found")
})
}
}
@@ -133,12 +144,13 @@ func TestBindingSingleAliasedTableNegative(t *testing.T) {
t.Run(query, func(t *testing.T) {
parse, err := sqlparser.Parse(query)
require.NoError(t, err)
- _, err = Analyze(parse.(sqlparser.SelectStatement), "", &FakeSI{
+ st, err := Analyze(parse.(sqlparser.SelectStatement), "", &FakeSI{
Tables: map[string]*vindexes.Table{
"t": {Name: sqlparser.NewIdentifierCS("t")},
},
})
- require.Error(t, err)
+ require.NoError(t, err)
+ require.Error(t, st.NotUnshardedErr)
})
}
}
@@ -299,16 +311,16 @@ func TestMissingTable(t *testing.T) {
for _, query := range queries {
t.Run(query, func(t *testing.T) {
parse, _ := sqlparser.Parse(query)
- _, err := Analyze(parse.(sqlparser.SelectStatement), "", &FakeSI{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "symbol t.col not found")
+ st, err := Analyze(parse.(sqlparser.SelectStatement), "", &FakeSI{})
+ require.NoError(t, err)
+ require.ErrorContains(t, st.NotUnshardedErr, "symbol t.col not found")
})
}
}
func TestUnknownColumnMap2(t *testing.T) {
varchar := querypb.Type_VARCHAR
- int := querypb.Type_INT32
+ integer := querypb.Type_INT32
authoritativeTblA := vindexes.Table{
Name: sqlparser.NewIdentifierCS("a"),
@@ -334,7 +346,7 @@ func TestUnknownColumnMap2(t *testing.T) {
Name: sqlparser.NewIdentifierCS("a"),
Columns: []vindexes.Column{{
Name: sqlparser.NewIdentifierCI("col"),
- Type: int,
+ Type: integer,
}},
ColumnListAuthoritative: true,
}
@@ -342,7 +354,7 @@ func TestUnknownColumnMap2(t *testing.T) {
Name: sqlparser.NewIdentifierCS("b"),
Columns: []vindexes.Column{{
Name: sqlparser.NewIdentifierCI("col"),
- Type: int,
+ Type: integer,
}},
ColumnListAuthoritative: true,
}
@@ -379,7 +391,7 @@ func TestUnknownColumnMap2(t *testing.T) {
name: "authoritative columns",
schema: map[string]*vindexes.Table{"a": &authoritativeTblA, "b": &authoritativeTblBWithInt},
err: false,
- typ: &int,
+ typ: &integer,
}, {
name: "authoritative columns with overlap",
schema: map[string]*vindexes.Table{"a": &authoritativeTblAWithConflict, "b": &authoritativeTblB},
@@ -459,16 +471,13 @@ func TestScoping(t *testing.T) {
t.Run(query.query, func(t *testing.T) {
parse, err := sqlparser.Parse(query.query)
require.NoError(t, err)
- _, err = Analyze(parse.(sqlparser.SelectStatement), "user", &FakeSI{
+ st, err := Analyze(parse.(sqlparser.SelectStatement), "user", &FakeSI{
Tables: map[string]*vindexes.Table{
"t": {Name: sqlparser.NewIdentifierCS("t")},
},
})
- if query.errorMessage == "" {
- require.NoError(t, err)
- } else {
- require.EqualError(t, err, query.errorMessage)
- }
+ require.NoError(t, err)
+ require.EqualError(t, st.NotUnshardedErr, query.errorMessage)
})
}
}
@@ -860,8 +869,9 @@ func TestUnionOrderByRewrite(t *testing.T) {
func TestInvalidQueries(t *testing.T) {
tcases := []struct {
- sql string
- err string
+ sql string
+ err string
+ shardedErr string
}{{
sql: "select t1.id, t1.col1 from t1 union select t2.uid from t2",
err: "The used SELECT statements have a different number of columns",
@@ -889,15 +899,37 @@ func TestInvalidQueries(t *testing.T) {
}, {
sql: "select (select sql_calc_found_rows id from a) as t",
err: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'",
+ }, {
+ sql: "select id from t1 natural join t2",
+ err: "unsupported: natural join",
+ }, {
+ sql: "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)",
+ err: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'",
+ }, {
+ sql: "select is_free_lock('xyz') from user",
+ err: "is_free_lock('xyz') allowed only with dual",
+ }, {
+ sql: "SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt",
+ err: "unsupported: json_table expressions",
+ }, {
+ sql: "select does_not_exist from t1",
+ shardedErr: "symbol does_not_exist not found",
+ }, {
+ sql: "select t1.does_not_exist from t1, t2",
+ shardedErr: "symbol t1.does_not_exist not found",
}}
for _, tc := range tcases {
t.Run(tc.sql, func(t *testing.T) {
parse, err := sqlparser.Parse(tc.sql)
require.NoError(t, err)
- _, err = Analyze(parse.(sqlparser.SelectStatement), "dbName", fakeSchemaInfo())
- require.Error(t, err)
- require.Equal(t, tc.err, err.Error())
+ st, err := Analyze(parse.(sqlparser.SelectStatement), "dbName", fakeSchemaInfo())
+ if tc.err != "" {
+ require.EqualError(t, err, tc.err)
+ } else {
+ require.NoError(t, err, tc.err)
+ require.EqualError(t, st.NotUnshardedErr, tc.shardedErr)
+ }
})
}
}
@@ -1010,9 +1042,13 @@ func TestScopingWDerivedTables(t *testing.T) {
"t": {Name: sqlparser.NewIdentifierCS("t")},
},
})
- if query.errorMessage != "" {
+
+ switch {
+ case query.errorMessage != "" && err != nil:
require.EqualError(t, err, query.errorMessage)
- } else {
+ case query.errorMessage != "":
+ require.EqualError(t, st.NotUnshardedErr, query.errorMessage)
+ default:
require.NoError(t, err)
sel := parse.(*sqlparser.Select)
assert.Equal(t, query.recursiveExpectation, st.RecursiveDeps(extract(sel, 0)), "RecursiveDeps")
@@ -1398,6 +1434,28 @@ func TestSingleUnshardedKeyspace(t *testing.T) {
}
}
+// TestScopingSubQueryJoinClause tests the scoping behavior of a subquery containing a join clause.
+// The test ensures that the scoping analysis correctly identifies and handles the relationships
+// between the tables involved in the join operation with the outer query.
+func TestScopingSubQueryJoinClause(t *testing.T) {
+ query := "select (select 1 from u1 join u2 on u1.id = u2.id and u2.id = u3.id) x from u3"
+
+ parse, err := sqlparser.Parse(query)
+ require.NoError(t, err)
+
+ st, err := Analyze(parse, "user", &FakeSI{
+ Tables: map[string]*vindexes.Table{
+ "t": {Name: sqlparser.NewIdentifierCS("t")},
+ },
+ })
+ require.NoError(t, err)
+ require.NoError(t, st.NotUnshardedErr)
+
+ tb := st.DirectDeps(parse.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.Subquery).Select.(*sqlparser.Select).From[0].(*sqlparser.JoinTableExpr).Condition.On)
+ require.Equal(t, 3, tb.NumberOfTables())
+
+}
+
var ks1 = &vindexes.Keyspace{
Name: "ks1",
Sharded: false,
diff --git a/go/vt/vtgate/semantics/binder.go b/go/vt/vtgate/semantics/binder.go
index 543aa53539e..8d1e2c434f6 100644
--- a/go/vt/vtgate/semantics/binder.go
+++ b/go/vt/vtgate/semantics/binder.go
@@ -94,7 +94,7 @@ func (b *binder) up(cursor *sqlparser.Cursor) error {
currentScope := b.scoper.currentScope()
deps, err := b.resolveColumn(node, currentScope, false)
if err != nil {
- if deps.direct.NumberOfTables() == 0 ||
+ if deps.direct.IsEmpty() ||
!strings.HasSuffix(err.Error(), "is ambiguous") ||
!b.canRewriteUsingJoin(deps, node) {
return err
@@ -127,14 +127,14 @@ func (b *binder) bindCountStar(node *sqlparser.CountStar) {
case *vTableInfo:
for _, col := range tbl.cols {
if sqlparser.EqualsExpr(node, col) {
- ts.MergeInPlace(b.recursive[col])
+ ts = ts.Merge(b.recursive[col])
}
}
default:
expr := tbl.getExpr()
if expr != nil {
setFor := b.tc.tableSetFor(expr)
- ts.MergeInPlace(setFor)
+ ts = ts.Merge(setFor)
}
}
}
@@ -196,15 +196,13 @@ func (b *binder) setSubQueryDependencies(subq *sqlparser.Subquery, currScope *sc
sco := currScope
for sco != nil {
for _, table := range sco.tables {
- tablesToKeep.MergeInPlace(table.getTableSet(b.org))
+ tablesToKeep = tablesToKeep.Merge(table.getTableSet(b.org))
}
sco = sco.parent
}
- subqDirectDeps.KeepOnly(tablesToKeep)
- subqRecursiveDeps.KeepOnly(tablesToKeep)
- b.recursive[subq] = subqRecursiveDeps
- b.direct[subq] = subqDirectDeps
+ b.recursive[subq] = subqRecursiveDeps.KeepOnly(tablesToKeep)
+ b.direct[subq] = subqDirectDeps.KeepOnly(tablesToKeep)
}
func (b *binder) createExtractedSubquery(cursor *sqlparser.Cursor, currScope *scope, subq *sqlparser.Subquery) (*sqlparser.ExtractedSubquery, error) {
@@ -262,7 +260,7 @@ func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allow
}
current = current.parent
}
- return dependency{}, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "symbol %s not found", sqlparser.String(colName))
+ return dependency{}, ShardedError{Inner: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "symbol %s not found", sqlparser.String(colName))}
}
func (b *binder) resolveColumnInScope(current *scope, expr *sqlparser.ColName, allowMulti bool) (dependencies, error) {
diff --git a/go/vt/vtgate/semantics/bitset/bitset.go b/go/vt/vtgate/semantics/bitset/bitset.go
new file mode 100644
index 00000000000..6bb1e2785aa
--- /dev/null
+++ b/go/vt/vtgate/semantics/bitset/bitset.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bitset
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+// A Bitset is an immutable collection of bits. You can perform logical operations
+// on it, but all mutable operations return a new Bitset.
+// It is safe to compare directly using the comparison operator and to use as a map key.
+type Bitset string
+
+const bitsetWidth = 8
+
+func bitsetWordSize(max int) int {
+ return max/bitsetWidth + 1
+}
+
+// toBiset converts a slice of bytes into a Bitset without allocating memory.
+// Bitset is actually a type alias for `string`, which is the only native type in Go that is dynamic _and_
+// immutable, so it can be used as a key in maps or compared directly.
+func toBitset(words []byte) Bitset {
+ if len(words) == 0 {
+ return ""
+ }
+ if words[len(words)-1] == 0 {
+ panic("toBitset: did not truncate")
+ }
+ // to convert a byte slice into a bitset without cloning the slice, we use the same trick as
+ // the Go standard library's `strings.Builder`. A slice header is [data, len, cap] while a
+ // string header is [data, len], hence the first two words of a slice header can be reinterpreted
+ // as a string header simply by casting into it.
+ // This assumes that the `words` slice will never be written to after returning from this function.
+ return *(*Bitset)(unsafe.Pointer(&words))
+}
+
+func minlen(a, b Bitset) int {
+ if len(a) < len(b) {
+ return len(a)
+ }
+ return len(b)
+}
+
+// Overlaps returns whether this Bitset and the input have any bits in common
+func (bs Bitset) Overlaps(b2 Bitset) bool {
+ min := minlen(bs, b2)
+ for i := 0; i < min; i++ {
+ if bs[i]&b2[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// Or returns the logical OR of the two Bitsets as a new Bitset
+func (bs Bitset) Or(b2 Bitset) Bitset {
+ if len(bs) == 0 {
+ return b2
+ }
+ if len(b2) == 0 {
+ return bs
+ }
+
+ small, large := bs, b2
+ if len(small) > len(large) {
+ small, large = large, small
+ }
+
+ merged := make([]byte, len(large))
+ m := 0
+
+ for m < len(small) {
+ merged[m] = small[m] | large[m]
+ m++
+ }
+ for m < len(large) {
+ merged[m] = large[m]
+ m++
+ }
+ return toBitset(merged)
+}
+
+// AndNot returns the logical AND NOT of the two Bitsets as a new Bitset
+func (bs Bitset) AndNot(b2 Bitset) Bitset {
+ if len(b2) == 0 {
+ return bs
+ }
+
+ merged := make([]byte, len(bs))
+ m := 0
+
+ for m = 0; m < len(bs); m++ {
+ if m < len(b2) {
+ merged[m] = bs[m] & ^b2[m]
+ } else {
+ merged[m] = bs[m]
+ }
+ }
+ for ; m > 0; m-- {
+ if merged[m-1] != 0 {
+ break
+ }
+ }
+ return toBitset(merged[:m])
+}
+
+// And returns the logical AND of the two bitsets as a new Bitset
+func (bs Bitset) And(b2 Bitset) Bitset {
+ if len(bs) == 0 || len(b2) == 0 {
+ return ""
+ }
+
+ merged := make([]byte, minlen(bs, b2))
+ m := 0
+
+ for m = 0; m < len(merged); m++ {
+ merged[m] = bs[m] & b2[m]
+ }
+ for ; m > 0; m-- {
+ if merged[m-1] != 0 {
+ break
+ }
+ }
+ return toBitset(merged[:m])
+}
+
+// Set returns a copy of this Bitset where the bit at `offset` is set
+func (bs Bitset) Set(offset int) Bitset {
+ alloc := len(bs)
+ if max := bitsetWordSize(offset); max > alloc {
+ alloc = max
+ }
+
+ words := make([]byte, alloc)
+ copy(words, bs)
+ words[offset/bitsetWidth] |= 1 << (offset % bitsetWidth)
+ return toBitset(words)
+}
+
+// SingleBit returns the position of the single bit that is set in this Bitset
+// If the Bitset is empty, or contains more than one set bit, it returns -1
+func (bs Bitset) SingleBit() int {
+ offset := -1
+ for i := 0; i < len(bs); i++ {
+ t := bs[i]
+ if t == 0 {
+ continue
+ }
+ if offset >= 0 || bits.OnesCount8(t) != 1 {
+ return -1
+ }
+ offset = i*bitsetWidth + bits.TrailingZeros8(t)
+ }
+ return offset
+}
+
+// IsContainedBy returns whether this Bitset is contained by the given Bitset
+func (bs Bitset) IsContainedBy(b2 Bitset) bool {
+ if len(bs) > len(b2) {
+ return false
+ }
+ for i := 0; i < len(bs); i++ {
+ left := bs[i]
+ rigt := b2[i]
+ if left&rigt != left {
+ return false
+ }
+ }
+ return true
+}
+
+// Popcount returns the number of bits that are set in this Bitset
+func (bs Bitset) Popcount() (count int) {
+ for i := 0; i < len(bs); i++ {
+ count += bits.OnesCount8(bs[i])
+ }
+ return
+}
+
+// ForEach calls the given callback with the position of each bit set in this Bitset
+func (bs Bitset) ForEach(yield func(int)) {
+ // From Lemire, "Iterating over set bits quickly"
+ // https://lemire.me/blog/2018/02/21/iterating-over-set-bits-quickly/
+ for i := 0; i < len(bs); i++ {
+ bitset := bs[i]
+ for bitset != 0 {
+ t := bitset & -bitset
+ r := bits.TrailingZeros8(bitset)
+ yield(i*bitsetWidth + r)
+ bitset ^= t
+ }
+ }
+}
+
+// Build creates a new immutable Bitset where all the given bits are set
+func Build(bits ...int) Bitset {
+ if len(bits) == 0 {
+ return ""
+ }
+
+ max := bits[0]
+ for _, b := range bits[1:] {
+ if b > max {
+ max = b
+ }
+ }
+
+ words := make([]byte, bitsetWordSize(max))
+ for _, b := range bits {
+ words[b/bitsetWidth] |= 1 << (b % bitsetWidth)
+ }
+ return toBitset(words)
+}
+
+const singleton = "\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00\x10\x00\x00\x00\x20\x00\x00\x00\x40\x00\x00\x00\x80"
+
+// Single returns a new Bitset where only the given bit is set.
+// If the given bit is less than 32, Single does not allocate to create a new Bitset.
+func Single(bit int) Bitset {
+ switch {
+ case bit < 8:
+ bit = (bit + 1) << 2
+ return Bitset(singleton[bit-1 : bit])
+ case bit < 16:
+ bit = (bit + 1 - 8) << 2
+ return Bitset(singleton[bit-2 : bit])
+ case bit < 24:
+ bit = (bit + 1 - 16) << 2
+ return Bitset(singleton[bit-3 : bit])
+ case bit < 32:
+ bit = (bit + 1 - 24) << 2
+ return Bitset(singleton[bit-4 : bit])
+ default:
+ words := make([]byte, bitsetWordSize(bit))
+ words[bit/bitsetWidth] |= 1 << (bit % bitsetWidth)
+ return toBitset(words)
+ }
+}
diff --git a/go/vt/servenv/purgelogs.go b/go/vt/vtgate/semantics/bitset/bitset_test.go
similarity index 56%
rename from go/vt/servenv/purgelogs.go
rename to go/vt/vtgate/semantics/bitset/bitset_test.go
index e5edc9e7be2..87bef299963 100644
--- a/go/vt/servenv/purgelogs.go
+++ b/go/vt/vtgate/semantics/bitset/bitset_test.go
@@ -1,5 +1,5 @@
/*
-Copyright 2019 The Vitess Authors.
+Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,15 +14,27 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package servenv
+package bitset
import (
- "vitess.io/vitess/go/vt/logutil"
+ "testing"
+
+ "github.com/stretchr/testify/require"
)
-func init() {
- OnInit(func() {
- go logutil.PurgeLogs()
- })
+func TestSingletons(t *testing.T) {
+ for i := 0; i < 40; i++ {
+ bs := Single(i)
+
+ require.Equal(t, 1, bs.Popcount())
+ require.Equal(t, i, bs.SingleBit())
+ var called bool
+ bs.ForEach(func(offset int) {
+ require.False(t, called)
+ require.Equal(t, i, offset)
+ called = true
+ })
+ require.True(t, called)
+ }
}
diff --git a/go/vt/vtgate/semantics/dependencies.go b/go/vt/vtgate/semantics/dependencies.go
index 197f2e95d8a..8e5a481e17d 100644
--- a/go/vt/vtgate/semantics/dependencies.go
+++ b/go/vt/vtgate/semantics/dependencies.go
@@ -113,8 +113,8 @@ func (c *certain) merge(d dependencies, allowMulti bool) dependencies {
if d.recursive == c.recursive {
return c
}
- c.direct.MergeInPlace(d.direct)
- c.recursive.MergeInPlace(d.recursive)
+ c.direct = c.direct.Merge(d.direct)
+ c.recursive = c.recursive.Merge(d.recursive)
if !allowMulti {
c.err = ambigousErr
}
diff --git a/go/vt/vtgate/semantics/derived_table.go b/go/vt/vtgate/semantics/derived_table.go
index 144d0b1d7f3..7379fa43f4e 100644
--- a/go/vt/vtgate/semantics/derived_table.go
+++ b/go/vt/vtgate/semantics/derived_table.go
@@ -17,6 +17,8 @@ limitations under the License.
package semantics
import (
+ "strings"
+
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
@@ -25,17 +27,18 @@ import (
// DerivedTable contains the information about the projection, tables involved in derived table.
type DerivedTable struct {
- tableName string
- ASTNode *sqlparser.AliasedTableExpr
- columnNames []string
- cols []sqlparser.Expr
- tables TableSet
+ tableName string
+ ASTNode *sqlparser.AliasedTableExpr
+ columnNames []string
+ cols []sqlparser.Expr
+ tables TableSet
+ isAuthoritative bool
}
var _ TableInfo = (*DerivedTable)(nil)
func createDerivedTableForExpressions(expressions sqlparser.SelectExprs, cols sqlparser.Columns, tables []TableInfo, org originable) *DerivedTable {
- vTbl := &DerivedTable{}
+ vTbl := &DerivedTable{isAuthoritative: true}
for i, selectExpr := range expressions {
switch expr := selectExpr.(type) {
case *sqlparser.AliasedExpr:
@@ -55,7 +58,10 @@ func createDerivedTableForExpressions(expressions sqlparser.SelectExprs, cols sq
}
case *sqlparser.StarExpr:
for _, table := range tables {
- vTbl.tables.MergeInPlace(table.getTableSet(org))
+ vTbl.tables = vTbl.tables.Merge(table.getTableSet(org))
+ if !table.authoritative() {
+ vTbl.isAuthoritative = false
+ }
}
}
}
@@ -66,7 +72,7 @@ func createDerivedTableForExpressions(expressions sqlparser.SelectExprs, cols sq
func (dt *DerivedTable) dependencies(colName string, org originable) (dependencies, error) {
directDeps := org.tableSetFor(dt.ASTNode)
for i, name := range dt.columnNames {
- if name != colName {
+ if !strings.EqualFold(name, colName) {
continue
}
_, recursiveDeps, qt := org.depsForExpr(dt.cols[i])
@@ -91,7 +97,7 @@ func (dt *DerivedTable) matches(name sqlparser.TableName) bool {
}
func (dt *DerivedTable) authoritative() bool {
- return true
+ return dt.isAuthoritative
}
// Name implements the TableInfo interface
@@ -119,7 +125,7 @@ func (dt *DerivedTable) getColumns() []ColumnInfo {
}
func (dt *DerivedTable) hasStar() bool {
- return dt.tables.NumberOfTables() > 0
+ return dt.tables.NonEmpty()
}
// GetTables implements the TableInfo interface
diff --git a/go/vt/vtgate/semantics/early_rewriter.go b/go/vt/vtgate/semantics/early_rewriter.go
index 6f006efd41e..32e67fd864c 100644
--- a/go/vt/vtgate/semantics/early_rewriter.go
+++ b/go/vt/vtgate/semantics/early_rewriter.go
@@ -18,6 +18,7 @@ package semantics
import (
"strconv"
+ "strings"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
@@ -52,7 +53,7 @@ func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error {
node.Join = sqlparser.NormalJoinType
r.warning = "straight join is converted to normal join"
}
- case *sqlparser.Order:
+ case sqlparser.OrderBy:
r.clause = "order clause"
rewriteHavingAndOrderBy(cursor, node)
case sqlparser.GroupBy:
@@ -109,6 +110,15 @@ func (r *earlyRewriter) expandStar(cursor *sqlparser.Cursor, node sqlparser.Sele
return nil
}
+// rewriteHavingAndOrderBy rewrites columns on the ORDER BY/HAVING
+// clauses to use aliases from the SELECT expressions when available.
+// The scoping rules are:
+// - A column identifier with no table qualifier that matches an alias introduced
+// in SELECT points to that expression, and not at any table column
+// - Except when expression aliased is an aggregation, and the column identifier in the
+// HAVING/ORDER BY clause is inside an aggregation function
+//
+// This is a fucking weird scoping rule, but it's what MySQL seems to do... ¯\_(ツ)_/¯
func rewriteHavingAndOrderBy(cursor *sqlparser.Cursor, node sqlparser.SQLNode) {
sel, isSel := cursor.Parent().(*sqlparser.Select)
if !isSel {
@@ -122,13 +132,31 @@ func rewriteHavingAndOrderBy(cursor *sqlparser.Cursor, node sqlparser.SQLNode) {
if !col.Qualifier.IsEmpty() {
return false
}
+ _, parentIsAggr := inner.Parent().(sqlparser.AggrFunc)
for _, e := range sel.SelectExprs {
ae, ok := e.(*sqlparser.AliasedExpr)
if !ok {
continue
}
if ae.As.Equal(col.Name) {
- inner.Replace(ae.Expr)
+ _, aliasPointsToAggr := ae.Expr.(sqlparser.AggrFunc)
+ if parentIsAggr && aliasPointsToAggr {
+ return false
+ }
+
+ safeToRewrite := true
+ sqlparser.Rewrite(ae.Expr, func(cursor *sqlparser.Cursor) bool {
+ switch cursor.Node().(type) {
+ case *sqlparser.ColName:
+ safeToRewrite = false
+ case sqlparser.AggrFunc:
+ return false
+ }
+ return true
+ }, nil)
+ if safeToRewrite {
+ inner.Replace(ae.Expr)
+ }
}
}
}
@@ -211,7 +239,7 @@ func rewriteJoinUsing(
usingCols = map[string]TableSet{}
}
for _, col := range tbl.getColumns() {
- _, found := usingCols[col.Name]
+ _, found := usingCols[strings.ToLower(col.Name)]
if found {
tblName, err := tbl.Name()
if err != nil {
@@ -313,7 +341,7 @@ func expandTableColumns(
ts, found := usingCols[col.Name]
if found {
for i, ts := range ts.Constituents() {
- if ts.Equals(currTable) {
+ if ts == currTable {
if i == 0 {
addColName(col)
} else {
diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go
index 9b40227af94..79f3eb7061c 100644
--- a/go/vt/vtgate/semantics/early_rewriter_test.go
+++ b/go/vt/vtgate/semantics/early_rewriter_test.go
@@ -145,10 +145,20 @@ func TestExpandStar(t *testing.T) {
expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b",
}, {
sql: "select * from t1 join t5 using (b) having b = 12",
- expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b having t1.b = 12",
+ expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b having b = 12",
}, {
sql: "select 1 from t1 join t5 using (b) having b = 12",
expSQL: "select 1 from t1 join t5 where t1.b = t5.b having t1.b = 12",
+ }, {
+ sql: "select * from (select 12) as t",
+ expSQL: "select t.`12` from (select 12 from dual) as t",
+ }, {
+ sql: "SELECT * FROM (SELECT *, 12 AS foo FROM t3) as results",
+ expSQL: "select * from (select *, 12 as foo from t3) as results",
+ }, {
+ // if we are only star-expanding authoritative tables, we don't need to stop the expansion
+ sql: "SELECT * FROM (SELECT t2.*, 12 AS foo FROM t3, t2) as results",
+ expSQL: "select results.c1, results.c2, results.foo from (select t2.c1 as c1, t2.c2 as c2, 12 as foo from t3, t2) as results",
}}
for _, tcase := range tcases {
t.Run(tcase.sql, func(t *testing.T) {
@@ -305,7 +315,7 @@ func TestOrderByGroupByLiteral(t *testing.T) {
}
}
-func TestHavingByColumnName(t *testing.T) {
+func TestHavingAndOrderByColumnName(t *testing.T) {
schemaInfo := &FakeSI{
Tables: map[string]*vindexes.Table{},
}
@@ -317,6 +327,12 @@ func TestHavingByColumnName(t *testing.T) {
}{{
sql: "select id, sum(foo) as sumOfFoo from t1 having sumOfFoo > 1",
expSQL: "select id, sum(foo) as sumOfFoo from t1 having sum(foo) > 1",
+ }, {
+ sql: "select id, sum(foo) as sumOfFoo from t1 order by sumOfFoo",
+ expSQL: "select id, sum(foo) as sumOfFoo from t1 order by sum(foo) asc",
+ }, {
+ sql: "select id, sum(foo) as foo from t1 having sum(foo) > 1",
+ expSQL: "select id, sum(foo) as foo from t1 having sum(foo) > 1",
}}
for _, tcase := range tcases {
t.Run(tcase.sql, func(t *testing.T) {
diff --git a/go/vt/vtgate/semantics/info_schema.go b/go/vt/vtgate/semantics/info_schema.go
new file mode 100644
index 00000000000..f834bb52f35
--- /dev/null
+++ b/go/vt/vtgate/semantics/info_schema.go
@@ -0,0 +1,1710 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package semantics
+
+import (
+ "strings"
+
+ "vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/vt/key"
+ "vitess.io/vitess/go/vt/proto/query"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+)
+
+func createCol(name string, typ int) vindexes.Column {
+ return vindexes.Column{Name: sqlparser.NewIdentifierCI(name), Type: query.Type(typ)}
+}
+
+// getInfoSchema57 returns a map of all information_schema tables and their columns with types
+// To recreate this information from MySQL, you can run the test in info_schema_gen_test.go
+func getInfoSchema57() map[string][]vindexes.Column {
+ infSchema := map[string][]vindexes.Column{}
+ var cols []vindexes.Column
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165))
+ cols = append(cols, createCol("DESCRIPTION", 6165))
+ cols = append(cols, createCol("MAXLEN", 265))
+ infSchema["CHARACTER_SETS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("ID", 265))
+ cols = append(cols, createCol("IS_DEFAULT", 6165))
+ cols = append(cols, createCol("IS_COMPILED", 6165))
+ cols = append(cols, createCol("SORTLEN", 265))
+ infSchema["COLLATIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["COLUMN_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("ORDINAL_POSITION", 265))
+ cols = append(cols, createCol("COLUMN_DEFAULT", 6163))
+ cols = append(cols, createCol("IS_NULLABLE", 6165))
+ cols = append(cols, createCol("DATA_TYPE", 6165))
+ cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265))
+ cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265))
+ cols = append(cols, createCol("NUMERIC_PRECISION", 265))
+ cols = append(cols, createCol("NUMERIC_SCALE", 265))
+ cols = append(cols, createCol("DATETIME_PRECISION", 265))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_TYPE", 6163))
+ cols = append(cols, createCol("COLUMN_KEY", 6165))
+ cols = append(cols, createCol("EXTRA", 6165))
+ cols = append(cols, createCol("PRIVILEGES", 6165))
+ cols = append(cols, createCol("COLUMN_COMMENT", 6165))
+ cols = append(cols, createCol("GENERATION_EXPRESSION", 6163))
+ infSchema["COLUMNS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("SUPPORT", 6165))
+ cols = append(cols, createCol("COMMENT", 6165))
+ cols = append(cols, createCol("TRANSACTIONS", 6165))
+ cols = append(cols, createCol("XA", 6165))
+ cols = append(cols, createCol("SAVEPOINTS", 6165))
+ infSchema["ENGINES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("EVENT_CATALOG", 6165))
+ cols = append(cols, createCol("EVENT_SCHEMA", 6165))
+ cols = append(cols, createCol("EVENT_NAME", 6165))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("TIME_ZONE", 6165))
+ cols = append(cols, createCol("EVENT_BODY", 6165))
+ cols = append(cols, createCol("EVENT_DEFINITION", 6163))
+ cols = append(cols, createCol("EVENT_TYPE", 6165))
+ cols = append(cols, createCol("EXECUTE_AT", 2064))
+ cols = append(cols, createCol("INTERVAL_VALUE", 6165))
+ cols = append(cols, createCol("INTERVAL_FIELD", 6165))
+ cols = append(cols, createCol("SQL_MODE", 6165))
+ cols = append(cols, createCol("STARTS", 2064))
+ cols = append(cols, createCol("ENDS", 2064))
+ cols = append(cols, createCol("STATUS", 6165))
+ cols = append(cols, createCol("ON_COMPLETION", 6165))
+ cols = append(cols, createCol("CREATED", 2064))
+ cols = append(cols, createCol("LAST_ALTERED", 2064))
+ cols = append(cols, createCol("LAST_EXECUTED", 2064))
+ cols = append(cols, createCol("EVENT_COMMENT", 6165))
+ cols = append(cols, createCol("ORIGINATOR", 265))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ cols = append(cols, createCol("DATABASE_COLLATION", 6165))
+ infSchema["EVENTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("FILE_ID", 265))
+ cols = append(cols, createCol("FILE_NAME", 6165))
+ cols = append(cols, createCol("FILE_TYPE", 6165))
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165))
+ cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265))
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("FULLTEXT_KEYS", 6165))
+ cols = append(cols, createCol("DELETED_ROWS", 265))
+ cols = append(cols, createCol("UPDATE_COUNT", 265))
+ cols = append(cols, createCol("FREE_EXTENTS", 265))
+ cols = append(cols, createCol("TOTAL_EXTENTS", 265))
+ cols = append(cols, createCol("EXTENT_SIZE", 265))
+ cols = append(cols, createCol("INITIAL_SIZE", 265))
+ cols = append(cols, createCol("MAXIMUM_SIZE", 265))
+ cols = append(cols, createCol("AUTOEXTEND_SIZE", 265))
+ cols = append(cols, createCol("CREATION_TIME", 2064))
+ cols = append(cols, createCol("LAST_UPDATE_TIME", 2064))
+ cols = append(cols, createCol("LAST_ACCESS_TIME", 2064))
+ cols = append(cols, createCol("RECOVER_TIME", 265))
+ cols = append(cols, createCol("TRANSACTION_COUNTER", 265))
+ cols = append(cols, createCol("VERSION", 265))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("TABLE_ROWS", 265))
+ cols = append(cols, createCol("AVG_ROW_LENGTH", 265))
+ cols = append(cols, createCol("DATA_LENGTH", 265))
+ cols = append(cols, createCol("MAX_DATA_LENGTH", 265))
+ cols = append(cols, createCol("INDEX_LENGTH", 265))
+ cols = append(cols, createCol("DATA_FREE", 265))
+ cols = append(cols, createCol("CREATE_TIME", 2064))
+ cols = append(cols, createCol("UPDATE_TIME", 2064))
+ cols = append(cols, createCol("CHECK_TIME", 2064))
+ cols = append(cols, createCol("CHECKSUM", 265))
+ cols = append(cols, createCol("STATUS", 6165))
+ cols = append(cols, createCol("EXTRA", 6165))
+ infSchema["FILES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("VARIABLE_NAME", 6165))
+ cols = append(cols, createCol("VARIABLE_VALUE", 6165))
+ infSchema["GLOBAL_STATUS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("VARIABLE_NAME", 6165))
+ cols = append(cols, createCol("VARIABLE_VALUE", 6165))
+ infSchema["GLOBAL_VARIABLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("POOL_ID", 265))
+ cols = append(cols, createCol("BLOCK_ID", 265))
+ cols = append(cols, createCol("SPACE", 265))
+ cols = append(cols, createCol("PAGE_NUMBER", 265))
+ cols = append(cols, createCol("PAGE_TYPE", 6165))
+ cols = append(cols, createCol("FLUSH_TYPE", 265))
+ cols = append(cols, createCol("FIX_COUNT", 265))
+ cols = append(cols, createCol("IS_HASHED", 6165))
+ cols = append(cols, createCol("NEWEST_MODIFICATION", 265))
+ cols = append(cols, createCol("OLDEST_MODIFICATION", 265))
+ cols = append(cols, createCol("ACCESS_TIME", 265))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("INDEX_NAME", 6165))
+ cols = append(cols, createCol("NUMBER_RECORDS", 265))
+ cols = append(cols, createCol("DATA_SIZE", 265))
+ cols = append(cols, createCol("COMPRESSED_SIZE", 265))
+ cols = append(cols, createCol("PAGE_STATE", 6165))
+ cols = append(cols, createCol("IO_FIX", 6165))
+ cols = append(cols, createCol("IS_OLD", 6165))
+ cols = append(cols, createCol("FREE_PAGE_CLOCK", 265))
+ infSchema["INNODB_BUFFER_PAGE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("POOL_ID", 265))
+ cols = append(cols, createCol("LRU_POSITION", 265))
+ cols = append(cols, createCol("SPACE", 265))
+ cols = append(cols, createCol("PAGE_NUMBER", 265))
+ cols = append(cols, createCol("PAGE_TYPE", 6165))
+ cols = append(cols, createCol("FLUSH_TYPE", 265))
+ cols = append(cols, createCol("FIX_COUNT", 265))
+ cols = append(cols, createCol("IS_HASHED", 6165))
+ cols = append(cols, createCol("NEWEST_MODIFICATION", 265))
+ cols = append(cols, createCol("OLDEST_MODIFICATION", 265))
+ cols = append(cols, createCol("ACCESS_TIME", 265))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("INDEX_NAME", 6165))
+ cols = append(cols, createCol("NUMBER_RECORDS", 265))
+ cols = append(cols, createCol("DATA_SIZE", 265))
+ cols = append(cols, createCol("COMPRESSED_SIZE", 265))
+ cols = append(cols, createCol("COMPRESSED", 6165))
+ cols = append(cols, createCol("IO_FIX", 6165))
+ cols = append(cols, createCol("IS_OLD", 6165))
+ cols = append(cols, createCol("FREE_PAGE_CLOCK", 265))
+ infSchema["INNODB_BUFFER_PAGE_LRU"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("POOL_ID", 265))
+ cols = append(cols, createCol("POOL_SIZE", 265))
+ cols = append(cols, createCol("FREE_BUFFERS", 265))
+ cols = append(cols, createCol("DATABASE_PAGES", 265))
+ cols = append(cols, createCol("OLD_DATABASE_PAGES", 265))
+ cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 265))
+ cols = append(cols, createCol("PENDING_DECOMPRESS", 265))
+ cols = append(cols, createCol("PENDING_READS", 265))
+ cols = append(cols, createCol("PENDING_FLUSH_LRU", 265))
+ cols = append(cols, createCol("PENDING_FLUSH_LIST", 265))
+ cols = append(cols, createCol("PAGES_MADE_YOUNG", 265))
+ cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 265))
+ cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1036))
+ cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1036))
+ cols = append(cols, createCol("NUMBER_PAGES_READ", 265))
+ cols = append(cols, createCol("NUMBER_PAGES_CREATED", 265))
+ cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 265))
+ cols = append(cols, createCol("PAGES_READ_RATE", 1036))
+ cols = append(cols, createCol("PAGES_CREATE_RATE", 1036))
+ cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1036))
+ cols = append(cols, createCol("NUMBER_PAGES_GET", 265))
+ cols = append(cols, createCol("HIT_RATE", 265))
+ cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 265))
+ cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265))
+ cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 265))
+ cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 265))
+ cols = append(cols, createCol("READ_AHEAD_RATE", 1036))
+ cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1036))
+ cols = append(cols, createCol("LRU_IO_TOTAL", 265))
+ cols = append(cols, createCol("LRU_IO_CURRENT", 265))
+ cols = append(cols, createCol("UNCOMPRESS_TOTAL", 265))
+ cols = append(cols, createCol("UNCOMPRESS_CURRENT", 265))
+ infSchema["INNODB_BUFFER_POOL_STATS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("database_name", 6165))
+ cols = append(cols, createCol("table_name", 6165))
+ cols = append(cols, createCol("index_name", 6165))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP_PER_INDEX"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("database_name", 6165))
+ cols = append(cols, createCol("table_name", 6165))
+ cols = append(cols, createCol("index_name", 6165))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP_RESET"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("buffer_pool_instance", 263))
+ cols = append(cols, createCol("pages_used", 263))
+ cols = append(cols, createCol("pages_free", 263))
+ cols = append(cols, createCol("relocation_ops", 265))
+ cols = append(cols, createCol("relocation_time", 263))
+ infSchema["INNODB_CMPMEM"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("buffer_pool_instance", 263))
+ cols = append(cols, createCol("pages_used", 263))
+ cols = append(cols, createCol("pages_free", 263))
+ cols = append(cols, createCol("relocation_ops", 265))
+ cols = append(cols, createCol("relocation_time", 263))
+ infSchema["INNODB_CMPMEM_RESET"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("DOC_ID", 265))
+ infSchema["INNODB_FT_BEING_DELETED"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("KEY", 6165))
+ cols = append(cols, createCol("VALUE", 6165))
+ infSchema["INNODB_FT_CONFIG"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("value", 6165))
+ infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("DOC_ID", 265))
+ infSchema["INNODB_FT_DELETED"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("WORD", 6165))
+ cols = append(cols, createCol("FIRST_DOC_ID", 265))
+ cols = append(cols, createCol("LAST_DOC_ID", 265))
+ cols = append(cols, createCol("DOC_COUNT", 265))
+ cols = append(cols, createCol("DOC_ID", 265))
+ cols = append(cols, createCol("POSITION", 265))
+ infSchema["INNODB_FT_INDEX_CACHE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("WORD", 6165))
+ cols = append(cols, createCol("FIRST_DOC_ID", 265))
+ cols = append(cols, createCol("LAST_DOC_ID", 265))
+ cols = append(cols, createCol("DOC_COUNT", 265))
+ cols = append(cols, createCol("DOC_ID", 265))
+ cols = append(cols, createCol("POSITION", 265))
+ infSchema["INNODB_FT_INDEX_TABLE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("requesting_trx_id", 6165))
+ cols = append(cols, createCol("requested_lock_id", 6165))
+ cols = append(cols, createCol("blocking_trx_id", 6165))
+ cols = append(cols, createCol("blocking_lock_id", 6165))
+ infSchema["INNODB_LOCK_WAITS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("lock_id", 6165))
+ cols = append(cols, createCol("lock_trx_id", 6165))
+ cols = append(cols, createCol("lock_mode", 6165))
+ cols = append(cols, createCol("lock_type", 6165))
+ cols = append(cols, createCol("lock_table", 6165))
+ cols = append(cols, createCol("lock_index", 6165))
+ cols = append(cols, createCol("lock_space", 265))
+ cols = append(cols, createCol("lock_page", 265))
+ cols = append(cols, createCol("lock_rec", 265))
+ cols = append(cols, createCol("lock_data", 6165))
+ infSchema["INNODB_LOCKS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("SUBSYSTEM", 6165))
+ cols = append(cols, createCol("COUNT", 265))
+ cols = append(cols, createCol("MAX_COUNT", 265))
+ cols = append(cols, createCol("MIN_COUNT", 265))
+ cols = append(cols, createCol("AVG_COUNT", 1036))
+ cols = append(cols, createCol("COUNT_RESET", 265))
+ cols = append(cols, createCol("MAX_COUNT_RESET", 265))
+ cols = append(cols, createCol("MIN_COUNT_RESET", 265))
+ cols = append(cols, createCol("AVG_COUNT_RESET", 1036))
+ cols = append(cols, createCol("TIME_ENABLED", 2064))
+ cols = append(cols, createCol("TIME_DISABLED", 2064))
+ cols = append(cols, createCol("TIME_ELAPSED", 265))
+ cols = append(cols, createCol("TIME_RESET", 2064))
+ cols = append(cols, createCol("STATUS", 6165))
+ cols = append(cols, createCol("TYPE", 6165))
+ cols = append(cols, createCol("COMMENT", 6165))
+ infSchema["INNODB_METRICS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 265))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("POS", 265))
+ cols = append(cols, createCol("MTYPE", 263))
+ cols = append(cols, createCol("PRTYPE", 263))
+ cols = append(cols, createCol("LEN", 263))
+ infSchema["INNODB_SYS_COLUMNS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPACE", 263))
+ cols = append(cols, createCol("PATH", 6165))
+ infSchema["INNODB_SYS_DATAFILES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("INDEX_ID", 265))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("POS", 263))
+ infSchema["INNODB_SYS_FIELDS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 6165))
+ cols = append(cols, createCol("FOR_NAME", 6165))
+ cols = append(cols, createCol("REF_NAME", 6165))
+ cols = append(cols, createCol("N_COLS", 263))
+ cols = append(cols, createCol("TYPE", 263))
+ infSchema["INNODB_SYS_FOREIGN"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 6165))
+ cols = append(cols, createCol("FOR_COL_NAME", 6165))
+ cols = append(cols, createCol("REF_COL_NAME", 6165))
+ cols = append(cols, createCol("POS", 263))
+ infSchema["INNODB_SYS_FOREIGN_COLS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("INDEX_ID", 265))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("TABLE_ID", 265))
+ cols = append(cols, createCol("TYPE", 263))
+ cols = append(cols, createCol("N_FIELDS", 263))
+ cols = append(cols, createCol("PAGE_NO", 263))
+ cols = append(cols, createCol("SPACE", 263))
+ cols = append(cols, createCol("MERGE_THRESHOLD", 263))
+ infSchema["INNODB_SYS_INDEXES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 265))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("FLAG", 263))
+ cols = append(cols, createCol("N_COLS", 263))
+ cols = append(cols, createCol("SPACE", 263))
+ cols = append(cols, createCol("FILE_FORMAT", 6165))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("ZIP_PAGE_SIZE", 263))
+ cols = append(cols, createCol("SPACE_TYPE", 6165))
+ infSchema["INNODB_SYS_TABLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPACE", 263))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("FLAG", 263))
+ cols = append(cols, createCol("FILE_FORMAT", 6165))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("PAGE_SIZE", 263))
+ cols = append(cols, createCol("ZIP_PAGE_SIZE", 263))
+ cols = append(cols, createCol("SPACE_TYPE", 6165))
+ cols = append(cols, createCol("FS_BLOCK_SIZE", 263))
+ cols = append(cols, createCol("FILE_SIZE", 265))
+ cols = append(cols, createCol("ALLOCATED_SIZE", 265))
+ infSchema["INNODB_SYS_TABLESPACES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 265))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("STATS_INITIALIZED", 6165))
+ cols = append(cols, createCol("NUM_ROWS", 265))
+ cols = append(cols, createCol("CLUST_INDEX_SIZE", 265))
+ cols = append(cols, createCol("OTHER_INDEX_SIZE", 265))
+ cols = append(cols, createCol("MODIFIED_COUNTER", 265))
+ cols = append(cols, createCol("AUTOINC", 265))
+ cols = append(cols, createCol("REF_COUNT", 263))
+ infSchema["INNODB_SYS_TABLESTATS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 265))
+ cols = append(cols, createCol("POS", 263))
+ cols = append(cols, createCol("BASE_POS", 263))
+ infSchema["INNODB_SYS_VIRTUAL"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 265))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("N_COLS", 263))
+ cols = append(cols, createCol("SPACE", 263))
+ cols = append(cols, createCol("PER_TABLE_TABLESPACE", 6165))
+ cols = append(cols, createCol("IS_COMPRESSED", 6165))
+ infSchema["INNODB_TEMP_TABLE_INFO"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("trx_id", 6165))
+ cols = append(cols, createCol("trx_state", 6165))
+ cols = append(cols, createCol("trx_started", 2064))
+ cols = append(cols, createCol("trx_requested_lock_id", 6165))
+ cols = append(cols, createCol("trx_wait_started", 2064))
+ cols = append(cols, createCol("trx_weight", 265))
+ cols = append(cols, createCol("trx_mysql_thread_id", 265))
+ cols = append(cols, createCol("trx_query", 6165))
+ cols = append(cols, createCol("trx_operation_state", 6165))
+ cols = append(cols, createCol("trx_tables_in_use", 265))
+ cols = append(cols, createCol("trx_tables_locked", 265))
+ cols = append(cols, createCol("trx_lock_structs", 265))
+ cols = append(cols, createCol("trx_lock_memory_bytes", 265))
+ cols = append(cols, createCol("trx_rows_locked", 265))
+ cols = append(cols, createCol("trx_rows_modified", 265))
+ cols = append(cols, createCol("trx_concurrency_tickets", 265))
+ cols = append(cols, createCol("trx_isolation_level", 6165))
+ cols = append(cols, createCol("trx_unique_checks", 263))
+ cols = append(cols, createCol("trx_foreign_key_checks", 263))
+ cols = append(cols, createCol("trx_last_foreign_key_error", 6165))
+ cols = append(cols, createCol("trx_adaptive_hash_latched", 263))
+ cols = append(cols, createCol("trx_adaptive_hash_timeout", 265))
+ cols = append(cols, createCol("trx_is_read_only", 263))
+ cols = append(cols, createCol("trx_autocommit_non_locking", 263))
+ infSchema["INNODB_TRX"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("ORDINAL_POSITION", 265))
+ cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 265))
+ cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165))
+ cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165))
+ infSchema["KEY_COLUMN_USAGE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("QUERY", 6163))
+ cols = append(cols, createCol("TRACE", 6163))
+ cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263))
+ cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257))
+ infSchema["OPTIMIZER_TRACE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPECIFIC_CATALOG", 6165))
+ cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165))
+ cols = append(cols, createCol("SPECIFIC_NAME", 6165))
+ cols = append(cols, createCol("ORDINAL_POSITION", 263))
+ cols = append(cols, createCol("PARAMETER_MODE", 6165))
+ cols = append(cols, createCol("PARAMETER_NAME", 6165))
+ cols = append(cols, createCol("DATA_TYPE", 6165))
+ cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263))
+ cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263))
+ cols = append(cols, createCol("NUMERIC_PRECISION", 265))
+ cols = append(cols, createCol("NUMERIC_SCALE", 263))
+ cols = append(cols, createCol("DATETIME_PRECISION", 265))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("DTD_IDENTIFIER", 6163))
+ cols = append(cols, createCol("ROUTINE_TYPE", 6165))
+ infSchema["PARAMETERS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("PARTITION_NAME", 6165))
+ cols = append(cols, createCol("SUBPARTITION_NAME", 6165))
+ cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 265))
+ cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 265))
+ cols = append(cols, createCol("PARTITION_METHOD", 6165))
+ cols = append(cols, createCol("SUBPARTITION_METHOD", 6165))
+ cols = append(cols, createCol("PARTITION_EXPRESSION", 6163))
+ cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6163))
+ cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163))
+ cols = append(cols, createCol("TABLE_ROWS", 265))
+ cols = append(cols, createCol("AVG_ROW_LENGTH", 265))
+ cols = append(cols, createCol("DATA_LENGTH", 265))
+ cols = append(cols, createCol("MAX_DATA_LENGTH", 265))
+ cols = append(cols, createCol("INDEX_LENGTH", 265))
+ cols = append(cols, createCol("DATA_FREE", 265))
+ cols = append(cols, createCol("CREATE_TIME", 2064))
+ cols = append(cols, createCol("UPDATE_TIME", 2064))
+ cols = append(cols, createCol("CHECK_TIME", 2064))
+ cols = append(cols, createCol("CHECKSUM", 265))
+ cols = append(cols, createCol("PARTITION_COMMENT", 6165))
+ cols = append(cols, createCol("NODEGROUP", 6165))
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ infSchema["PARTITIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("PLUGIN_NAME", 6165))
+ cols = append(cols, createCol("PLUGIN_VERSION", 6165))
+ cols = append(cols, createCol("PLUGIN_STATUS", 6165))
+ cols = append(cols, createCol("PLUGIN_TYPE", 6165))
+ cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165))
+ cols = append(cols, createCol("PLUGIN_LIBRARY", 6165))
+ cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165))
+ cols = append(cols, createCol("PLUGIN_AUTHOR", 6165))
+ cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6163))
+ cols = append(cols, createCol("PLUGIN_LICENSE", 6165))
+ cols = append(cols, createCol("LOAD_OPTION", 6165))
+ infSchema["PLUGINS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 265))
+ cols = append(cols, createCol("USER", 6165))
+ cols = append(cols, createCol("HOST", 6165))
+ cols = append(cols, createCol("DB", 6165))
+ cols = append(cols, createCol("COMMAND", 6165))
+ cols = append(cols, createCol("TIME", 263))
+ cols = append(cols, createCol("STATE", 6165))
+ cols = append(cols, createCol("INFO", 6163))
+ infSchema["PROCESSLIST"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("QUERY_ID", 263))
+ cols = append(cols, createCol("SEQ", 263))
+ cols = append(cols, createCol("STATE", 6165))
+ cols = append(cols, createCol("DURATION", 18))
+ cols = append(cols, createCol("CPU_USER", 18))
+ cols = append(cols, createCol("CPU_SYSTEM", 18))
+ cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263))
+ cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263))
+ cols = append(cols, createCol("BLOCK_OPS_IN", 263))
+ cols = append(cols, createCol("BLOCK_OPS_OUT", 263))
+ cols = append(cols, createCol("MESSAGES_SENT", 263))
+ cols = append(cols, createCol("MESSAGES_RECEIVED", 263))
+ cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263))
+ cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263))
+ cols = append(cols, createCol("SWAPS", 263))
+ cols = append(cols, createCol("SOURCE_FUNCTION", 6165))
+ cols = append(cols, createCol("SOURCE_FILE", 6165))
+ cols = append(cols, createCol("SOURCE_LINE", 263))
+ infSchema["PROFILING"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("MATCH_OPTION", 6165))
+ cols = append(cols, createCol("UPDATE_RULE", 6165))
+ cols = append(cols, createCol("DELETE_RULE", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165))
+ infSchema["REFERENTIAL_CONSTRAINTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPECIFIC_NAME", 6165))
+ cols = append(cols, createCol("ROUTINE_CATALOG", 6165))
+ cols = append(cols, createCol("ROUTINE_SCHEMA", 6165))
+ cols = append(cols, createCol("ROUTINE_NAME", 6165))
+ cols = append(cols, createCol("ROUTINE_TYPE", 6165))
+ cols = append(cols, createCol("DATA_TYPE", 6165))
+ cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263))
+ cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263))
+ cols = append(cols, createCol("NUMERIC_PRECISION", 265))
+ cols = append(cols, createCol("NUMERIC_SCALE", 263))
+ cols = append(cols, createCol("DATETIME_PRECISION", 265))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("DTD_IDENTIFIER", 6163))
+ cols = append(cols, createCol("ROUTINE_BODY", 6165))
+ cols = append(cols, createCol("ROUTINE_DEFINITION", 6163))
+ cols = append(cols, createCol("EXTERNAL_NAME", 6165))
+ cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165))
+ cols = append(cols, createCol("PARAMETER_STYLE", 6165))
+ cols = append(cols, createCol("IS_DETERMINISTIC", 6165))
+ cols = append(cols, createCol("SQL_DATA_ACCESS", 6165))
+ cols = append(cols, createCol("SQL_PATH", 6165))
+ cols = append(cols, createCol("SECURITY_TYPE", 6165))
+ cols = append(cols, createCol("CREATED", 2064))
+ cols = append(cols, createCol("LAST_ALTERED", 2064))
+ cols = append(cols, createCol("SQL_MODE", 6165))
+ cols = append(cols, createCol("ROUTINE_COMMENT", 6163))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ cols = append(cols, createCol("DATABASE_COLLATION", 6165))
+ infSchema["ROUTINES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["SCHEMA_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CATALOG_NAME", 6165))
+ cols = append(cols, createCol("SCHEMA_NAME", 6165))
+ cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165))
+ cols = append(cols, createCol("SQL_PATH", 6165))
+ infSchema["SCHEMATA"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("VARIABLE_NAME", 6165))
+ cols = append(cols, createCol("VARIABLE_VALUE", 6165))
+ infSchema["SESSION_STATUS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("VARIABLE_NAME", 6165))
+ cols = append(cols, createCol("VARIABLE_VALUE", 6165))
+ infSchema["SESSION_VARIABLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("NON_UNIQUE", 265))
+ cols = append(cols, createCol("INDEX_SCHEMA", 6165))
+ cols = append(cols, createCol("INDEX_NAME", 6165))
+ cols = append(cols, createCol("SEQ_IN_INDEX", 265))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("COLLATION", 6165))
+ cols = append(cols, createCol("CARDINALITY", 265))
+ cols = append(cols, createCol("SUB_PART", 265))
+ cols = append(cols, createCol("PACKED", 6165))
+ cols = append(cols, createCol("NULLABLE", 6165))
+ cols = append(cols, createCol("INDEX_TYPE", 6165))
+ cols = append(cols, createCol("COMMENT", 6165))
+ cols = append(cols, createCol("INDEX_COMMENT", 6165))
+ infSchema["STATISTICS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("CONSTRAINT_TYPE", 6165))
+ infSchema["TABLE_CONSTRAINTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["TABLE_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("TABLE_TYPE", 6165))
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("VERSION", 265))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("TABLE_ROWS", 265))
+ cols = append(cols, createCol("AVG_ROW_LENGTH", 265))
+ cols = append(cols, createCol("DATA_LENGTH", 265))
+ cols = append(cols, createCol("MAX_DATA_LENGTH", 265))
+ cols = append(cols, createCol("INDEX_LENGTH", 265))
+ cols = append(cols, createCol("DATA_FREE", 265))
+ cols = append(cols, createCol("AUTO_INCREMENT", 265))
+ cols = append(cols, createCol("CREATE_TIME", 2064))
+ cols = append(cols, createCol("UPDATE_TIME", 2064))
+ cols = append(cols, createCol("CHECK_TIME", 2064))
+ cols = append(cols, createCol("TABLE_COLLATION", 6165))
+ cols = append(cols, createCol("CHECKSUM", 265))
+ cols = append(cols, createCol("CREATE_OPTIONS", 6165))
+ cols = append(cols, createCol("TABLE_COMMENT", 6165))
+ infSchema["TABLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("TABLESPACE_TYPE", 6165))
+ cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165))
+ cols = append(cols, createCol("EXTENT_SIZE", 265))
+ cols = append(cols, createCol("AUTOEXTEND_SIZE", 265))
+ cols = append(cols, createCol("MAXIMUM_SIZE", 265))
+ cols = append(cols, createCol("NODEGROUP_ID", 265))
+ cols = append(cols, createCol("TABLESPACE_COMMENT", 6165))
+ infSchema["TABLESPACES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TRIGGER_CATALOG", 6165))
+ cols = append(cols, createCol("TRIGGER_SCHEMA", 6165))
+ cols = append(cols, createCol("TRIGGER_NAME", 6165))
+ cols = append(cols, createCol("EVENT_MANIPULATION", 6165))
+ cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165))
+ cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165))
+ cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165))
+ cols = append(cols, createCol("ACTION_ORDER", 265))
+ cols = append(cols, createCol("ACTION_CONDITION", 6163))
+ cols = append(cols, createCol("ACTION_STATEMENT", 6163))
+ cols = append(cols, createCol("ACTION_ORIENTATION", 6165))
+ cols = append(cols, createCol("ACTION_TIMING", 6165))
+ cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 6165))
+ cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 6165))
+ cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165))
+ cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165))
+ cols = append(cols, createCol("CREATED", 2064))
+ cols = append(cols, createCol("SQL_MODE", 6165))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ cols = append(cols, createCol("DATABASE_COLLATION", 6165))
+ infSchema["TRIGGERS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["USER_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("VIEW_DEFINITION", 6163))
+ cols = append(cols, createCol("CHECK_OPTION", 6165))
+ cols = append(cols, createCol("IS_UPDATABLE", 6165))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("SECURITY_TYPE", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ infSchema["VIEWS"] = cols
+
+ return infSchema
+}
+
+// getInfoSchema80 returns a map of all information_schema tables and their columns with types
+// To recreate this information from MySQL, you can run the test in info_schema_gen_test.go
+func getInfoSchema80() map[string][]vindexes.Column {
+ infSchema := map[string][]vindexes.Column{}
+ var cols []vindexes.Column
+ cols = append(cols, createCol("USER", 6165))
+ cols = append(cols, createCol("HOST", 6165))
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("GRANTEE_HOST", 6165))
+ cols = append(cols, createCol("ROLE_NAME", 6165))
+ cols = append(cols, createCol("ROLE_HOST", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ cols = append(cols, createCol("IS_DEFAULT", 6165))
+ cols = append(cols, createCol("IS_MANDATORY", 6165))
+ infSchema["ADMINISTRABLE_ROLE_AUTHORIZATIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("USER", 6165))
+ cols = append(cols, createCol("HOST", 6165))
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("GRANTEE_HOST", 6165))
+ cols = append(cols, createCol("ROLE_NAME", 6165))
+ cols = append(cols, createCol("ROLE_HOST", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ cols = append(cols, createCol("IS_DEFAULT", 6165))
+ cols = append(cols, createCol("IS_MANDATORY", 6165))
+ infSchema["APPLICABLE_ROLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165))
+ cols = append(cols, createCol("DESCRIPTION", 6165))
+ cols = append(cols, createCol("MAXLEN", 776))
+ infSchema["CHARACTER_SETS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("CHECK_CLAUSE", 6163))
+ infSchema["CHECK_CONSTRAINTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("ID", 778))
+ cols = append(cols, createCol("IS_DEFAULT", 6165))
+ cols = append(cols, createCol("IS_COMPILED", 6165))
+ cols = append(cols, createCol("SORTLEN", 776))
+ cols = append(cols, createCol("PAD_ATTRIBUTE", 2074))
+ infSchema["COLLATIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["COLUMN_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SCHEMA_NAME", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("HISTOGRAM", 2078))
+ infSchema["COLUMN_STATISTICS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("ORDINAL_POSITION", 776))
+ cols = append(cols, createCol("COLUMN_DEFAULT", 6163))
+ cols = append(cols, createCol("IS_NULLABLE", 6165))
+ cols = append(cols, createCol("DATA_TYPE", 6163))
+ cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265))
+ cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265))
+ cols = append(cols, createCol("NUMERIC_PRECISION", 778))
+ cols = append(cols, createCol("NUMERIC_SCALE", 778))
+ cols = append(cols, createCol("DATETIME_PRECISION", 776))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_TYPE", 6163))
+ cols = append(cols, createCol("COLUMN_KEY", 2074))
+ cols = append(cols, createCol("EXTRA", 6165))
+ cols = append(cols, createCol("PRIVILEGES", 6165))
+ cols = append(cols, createCol("COLUMN_COMMENT", 6163))
+ cols = append(cols, createCol("GENERATION_EXPRESSION", 6163))
+ cols = append(cols, createCol("SRS_ID", 776))
+ infSchema["COLUMNS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078))
+ cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078))
+ infSchema["COLUMNS_EXTENSIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ROLE_NAME", 6165))
+ cols = append(cols, createCol("ROLE_HOST", 6165))
+ cols = append(cols, createCol("IS_DEFAULT", 6165))
+ cols = append(cols, createCol("IS_MANDATORY", 6165))
+ infSchema["ENABLED_ROLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("SUPPORT", 6165))
+ cols = append(cols, createCol("COMMENT", 6165))
+ cols = append(cols, createCol("TRANSACTIONS", 6165))
+ cols = append(cols, createCol("XA", 6165))
+ cols = append(cols, createCol("SAVEPOINTS", 6165))
+ infSchema["ENGINES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("EVENT_CATALOG", 6165))
+ cols = append(cols, createCol("EVENT_SCHEMA", 6165))
+ cols = append(cols, createCol("EVENT_NAME", 6165))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("TIME_ZONE", 6165))
+ cols = append(cols, createCol("EVENT_BODY", 6165))
+ cols = append(cols, createCol("EVENT_DEFINITION", 6163))
+ cols = append(cols, createCol("EVENT_TYPE", 6165))
+ cols = append(cols, createCol("EXECUTE_AT", 2064))
+ cols = append(cols, createCol("INTERVAL_VALUE", 6165))
+ cols = append(cols, createCol("INTERVAL_FIELD", 2074))
+ cols = append(cols, createCol("SQL_MODE", 2075))
+ cols = append(cols, createCol("STARTS", 2064))
+ cols = append(cols, createCol("ENDS", 2064))
+ cols = append(cols, createCol("STATUS", 2074))
+ cols = append(cols, createCol("ON_COMPLETION", 6165))
+ cols = append(cols, createCol("CREATED", 2061))
+ cols = append(cols, createCol("LAST_ALTERED", 2061))
+ cols = append(cols, createCol("LAST_EXECUTED", 2064))
+ cols = append(cols, createCol("EVENT_COMMENT", 6165))
+ cols = append(cols, createCol("ORIGINATOR", 776))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ cols = append(cols, createCol("DATABASE_COLLATION", 6165))
+ infSchema["EVENTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("FILE_ID", 265))
+ cols = append(cols, createCol("FILE_NAME", 6163))
+ cols = append(cols, createCol("FILE_TYPE", 6165))
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6167))
+ cols = append(cols, createCol("TABLE_SCHEMA", 10264))
+ cols = append(cols, createCol("TABLE_NAME", 10264))
+ cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165))
+ cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265))
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("FULLTEXT_KEYS", 10264))
+ cols = append(cols, createCol("DELETED_ROWS", 10264))
+ cols = append(cols, createCol("UPDATE_COUNT", 10264))
+ cols = append(cols, createCol("FREE_EXTENTS", 265))
+ cols = append(cols, createCol("TOTAL_EXTENTS", 265))
+ cols = append(cols, createCol("EXTENT_SIZE", 265))
+ cols = append(cols, createCol("INITIAL_SIZE", 265))
+ cols = append(cols, createCol("MAXIMUM_SIZE", 265))
+ cols = append(cols, createCol("AUTOEXTEND_SIZE", 265))
+ cols = append(cols, createCol("CREATION_TIME", 10264))
+ cols = append(cols, createCol("LAST_UPDATE_TIME", 10264))
+ cols = append(cols, createCol("LAST_ACCESS_TIME", 10264))
+ cols = append(cols, createCol("RECOVER_TIME", 10264))
+ cols = append(cols, createCol("TRANSACTION_COUNTER", 10264))
+ cols = append(cols, createCol("VERSION", 265))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("TABLE_ROWS", 10264))
+ cols = append(cols, createCol("AVG_ROW_LENGTH", 10264))
+ cols = append(cols, createCol("DATA_LENGTH", 10264))
+ cols = append(cols, createCol("MAX_DATA_LENGTH", 10264))
+ cols = append(cols, createCol("INDEX_LENGTH", 10264))
+ cols = append(cols, createCol("DATA_FREE", 265))
+ cols = append(cols, createCol("CREATE_TIME", 10264))
+ cols = append(cols, createCol("UPDATE_TIME", 10264))
+ cols = append(cols, createCol("CHECK_TIME", 10264))
+ cols = append(cols, createCol("CHECKSUM", 10264))
+ cols = append(cols, createCol("STATUS", 6165))
+ cols = append(cols, createCol("EXTRA", 6165))
+ infSchema["FILES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("POOL_ID", 778))
+ cols = append(cols, createCol("BLOCK_ID", 778))
+ cols = append(cols, createCol("SPACE", 778))
+ cols = append(cols, createCol("PAGE_NUMBER", 778))
+ cols = append(cols, createCol("PAGE_TYPE", 6165))
+ cols = append(cols, createCol("FLUSH_TYPE", 778))
+ cols = append(cols, createCol("FIX_COUNT", 778))
+ cols = append(cols, createCol("IS_HASHED", 6165))
+ cols = append(cols, createCol("NEWEST_MODIFICATION", 778))
+ cols = append(cols, createCol("OLDEST_MODIFICATION", 778))
+ cols = append(cols, createCol("ACCESS_TIME", 778))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("INDEX_NAME", 6165))
+ cols = append(cols, createCol("NUMBER_RECORDS", 778))
+ cols = append(cols, createCol("DATA_SIZE", 778))
+ cols = append(cols, createCol("COMPRESSED_SIZE", 778))
+ cols = append(cols, createCol("PAGE_STATE", 6165))
+ cols = append(cols, createCol("IO_FIX", 6165))
+ cols = append(cols, createCol("IS_OLD", 6165))
+ cols = append(cols, createCol("FREE_PAGE_CLOCK", 778))
+ cols = append(cols, createCol("IS_STALE", 6165))
+ infSchema["INNODB_BUFFER_PAGE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("POOL_ID", 778))
+ cols = append(cols, createCol("LRU_POSITION", 778))
+ cols = append(cols, createCol("SPACE", 778))
+ cols = append(cols, createCol("PAGE_NUMBER", 778))
+ cols = append(cols, createCol("PAGE_TYPE", 6165))
+ cols = append(cols, createCol("FLUSH_TYPE", 778))
+ cols = append(cols, createCol("FIX_COUNT", 778))
+ cols = append(cols, createCol("IS_HASHED", 6165))
+ cols = append(cols, createCol("NEWEST_MODIFICATION", 778))
+ cols = append(cols, createCol("OLDEST_MODIFICATION", 778))
+ cols = append(cols, createCol("ACCESS_TIME", 778))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("INDEX_NAME", 6165))
+ cols = append(cols, createCol("NUMBER_RECORDS", 778))
+ cols = append(cols, createCol("DATA_SIZE", 778))
+ cols = append(cols, createCol("COMPRESSED_SIZE", 778))
+ cols = append(cols, createCol("COMPRESSED", 6165))
+ cols = append(cols, createCol("IO_FIX", 6165))
+ cols = append(cols, createCol("IS_OLD", 6165))
+ cols = append(cols, createCol("FREE_PAGE_CLOCK", 778))
+ infSchema["INNODB_BUFFER_PAGE_LRU"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("POOL_ID", 778))
+ cols = append(cols, createCol("POOL_SIZE", 778))
+ cols = append(cols, createCol("FREE_BUFFERS", 778))
+ cols = append(cols, createCol("DATABASE_PAGES", 778))
+ cols = append(cols, createCol("OLD_DATABASE_PAGES", 778))
+ cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 778))
+ cols = append(cols, createCol("PENDING_DECOMPRESS", 778))
+ cols = append(cols, createCol("PENDING_READS", 778))
+ cols = append(cols, createCol("PENDING_FLUSH_LRU", 778))
+ cols = append(cols, createCol("PENDING_FLUSH_LIST", 778))
+ cols = append(cols, createCol("PAGES_MADE_YOUNG", 778))
+ cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 778))
+ cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1035))
+ cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1035))
+ cols = append(cols, createCol("NUMBER_PAGES_READ", 778))
+ cols = append(cols, createCol("NUMBER_PAGES_CREATED", 778))
+ cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 778))
+ cols = append(cols, createCol("PAGES_READ_RATE", 1035))
+ cols = append(cols, createCol("PAGES_CREATE_RATE", 1035))
+ cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1035))
+ cols = append(cols, createCol("NUMBER_PAGES_GET", 778))
+ cols = append(cols, createCol("HIT_RATE", 778))
+ cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 778))
+ cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778))
+ cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 778))
+ cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 778))
+ cols = append(cols, createCol("READ_AHEAD_RATE", 1035))
+ cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1035))
+ cols = append(cols, createCol("LRU_IO_TOTAL", 778))
+ cols = append(cols, createCol("LRU_IO_CURRENT", 778))
+ cols = append(cols, createCol("UNCOMPRESS_TOTAL", 778))
+ cols = append(cols, createCol("UNCOMPRESS_CURRENT", 778))
+ infSchema["INNODB_BUFFER_POOL_STATS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPACE_ID", 776))
+ cols = append(cols, createCol("INDEX_ID", 778))
+ cols = append(cols, createCol("N_CACHED_PAGES", 778))
+ infSchema["INNODB_CACHED_INDEXES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("database_name", 6165))
+ cols = append(cols, createCol("table_name", 6165))
+ cols = append(cols, createCol("index_name", 6165))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP_PER_INDEX"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("database_name", 6165))
+ cols = append(cols, createCol("table_name", 6165))
+ cols = append(cols, createCol("index_name", 6165))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("compress_ops", 263))
+ cols = append(cols, createCol("compress_ops_ok", 263))
+ cols = append(cols, createCol("compress_time", 263))
+ cols = append(cols, createCol("uncompress_ops", 263))
+ cols = append(cols, createCol("uncompress_time", 263))
+ infSchema["INNODB_CMP_RESET"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("buffer_pool_instance", 263))
+ cols = append(cols, createCol("pages_used", 263))
+ cols = append(cols, createCol("pages_free", 263))
+ cols = append(cols, createCol("relocation_ops", 265))
+ cols = append(cols, createCol("relocation_time", 263))
+ infSchema["INNODB_CMPMEM"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("page_size", 263))
+ cols = append(cols, createCol("buffer_pool_instance", 263))
+ cols = append(cols, createCol("pages_used", 263))
+ cols = append(cols, createCol("pages_free", 263))
+ cols = append(cols, createCol("relocation_ops", 265))
+ cols = append(cols, createCol("relocation_time", 263))
+ infSchema["INNODB_CMPMEM_RESET"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 778))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("POS", 778))
+ cols = append(cols, createCol("MTYPE", 263))
+ cols = append(cols, createCol("PRTYPE", 263))
+ cols = append(cols, createCol("LEN", 263))
+ cols = append(cols, createCol("HAS_DEFAULT", 263))
+ cols = append(cols, createCol("DEFAULT_VALUE", 6163))
+ infSchema["INNODB_COLUMNS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPACE", 10262))
+ cols = append(cols, createCol("PATH", 6165))
+ infSchema["INNODB_DATAFILES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("INDEX_ID", 10262))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("POS", 778))
+ infSchema["INNODB_FIELDS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 6165))
+ cols = append(cols, createCol("FOR_NAME", 6165))
+ cols = append(cols, createCol("REF_NAME", 6165))
+ cols = append(cols, createCol("N_COLS", 265))
+ cols = append(cols, createCol("TYPE", 778))
+ infSchema["INNODB_FOREIGN"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 6165))
+ cols = append(cols, createCol("FOR_COL_NAME", 6165))
+ cols = append(cols, createCol("REF_COL_NAME", 6165))
+ cols = append(cols, createCol("POS", 776))
+ infSchema["INNODB_FOREIGN_COLS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("DOC_ID", 778))
+ infSchema["INNODB_FT_BEING_DELETED"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("KEY", 6165))
+ cols = append(cols, createCol("VALUE", 6165))
+ infSchema["INNODB_FT_CONFIG"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("value", 6165))
+ infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("DOC_ID", 778))
+ infSchema["INNODB_FT_DELETED"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("WORD", 6165))
+ cols = append(cols, createCol("FIRST_DOC_ID", 778))
+ cols = append(cols, createCol("LAST_DOC_ID", 778))
+ cols = append(cols, createCol("DOC_COUNT", 778))
+ cols = append(cols, createCol("DOC_ID", 778))
+ cols = append(cols, createCol("POSITION", 778))
+ infSchema["INNODB_FT_INDEX_CACHE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("WORD", 6165))
+ cols = append(cols, createCol("FIRST_DOC_ID", 778))
+ cols = append(cols, createCol("LAST_DOC_ID", 778))
+ cols = append(cols, createCol("DOC_COUNT", 778))
+ cols = append(cols, createCol("DOC_ID", 778))
+ cols = append(cols, createCol("POSITION", 778))
+ infSchema["INNODB_FT_INDEX_TABLE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("INDEX_ID", 778))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("TABLE_ID", 778))
+ cols = append(cols, createCol("TYPE", 263))
+ cols = append(cols, createCol("N_FIELDS", 263))
+ cols = append(cols, createCol("PAGE_NO", 263))
+ cols = append(cols, createCol("SPACE", 263))
+ cols = append(cols, createCol("MERGE_THRESHOLD", 263))
+ infSchema["INNODB_INDEXES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("SUBSYSTEM", 6165))
+ cols = append(cols, createCol("COUNT", 265))
+ cols = append(cols, createCol("MAX_COUNT", 265))
+ cols = append(cols, createCol("MIN_COUNT", 265))
+ cols = append(cols, createCol("AVG_COUNT", 1035))
+ cols = append(cols, createCol("COUNT_RESET", 265))
+ cols = append(cols, createCol("MAX_COUNT_RESET", 265))
+ cols = append(cols, createCol("MIN_COUNT_RESET", 265))
+ cols = append(cols, createCol("AVG_COUNT_RESET", 1035))
+ cols = append(cols, createCol("TIME_ENABLED", 2064))
+ cols = append(cols, createCol("TIME_DISABLED", 2064))
+ cols = append(cols, createCol("TIME_ELAPSED", 265))
+ cols = append(cols, createCol("TIME_RESET", 2064))
+ cols = append(cols, createCol("STATUS", 6165))
+ cols = append(cols, createCol("TYPE", 6165))
+ cols = append(cols, createCol("COMMENT", 6165))
+ infSchema["INNODB_METRICS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 776))
+ cols = append(cols, createCol("SPACE", 776))
+ cols = append(cols, createCol("PATH", 6165))
+ cols = append(cols, createCol("SIZE", 778))
+ cols = append(cols, createCol("STATE", 6165))
+ cols = append(cols, createCol("PURPOSE", 6165))
+ infSchema["INNODB_SESSION_TEMP_TABLESPACES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 778))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("FLAG", 263))
+ cols = append(cols, createCol("N_COLS", 263))
+ cols = append(cols, createCol("SPACE", 265))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("ZIP_PAGE_SIZE", 776))
+ cols = append(cols, createCol("SPACE_TYPE", 6165))
+ cols = append(cols, createCol("INSTANT_COLS", 263))
+ infSchema["INNODB_TABLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPACE", 776))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("FLAG", 776))
+ cols = append(cols, createCol("ROW_FORMAT", 6165))
+ cols = append(cols, createCol("PAGE_SIZE", 776))
+ cols = append(cols, createCol("ZIP_PAGE_SIZE", 776))
+ cols = append(cols, createCol("SPACE_TYPE", 6165))
+ cols = append(cols, createCol("FS_BLOCK_SIZE", 776))
+ cols = append(cols, createCol("FILE_SIZE", 778))
+ cols = append(cols, createCol("ALLOCATED_SIZE", 778))
+ cols = append(cols, createCol("AUTOEXTEND_SIZE", 778))
+ cols = append(cols, createCol("SERVER_VERSION", 6165))
+ cols = append(cols, createCol("SPACE_VERSION", 776))
+ cols = append(cols, createCol("ENCRYPTION", 6165))
+ cols = append(cols, createCol("STATE", 6165))
+ infSchema["INNODB_TABLESPACES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPACE", 10262))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("PATH", 6165))
+ cols = append(cols, createCol("FLAG", 10262))
+ cols = append(cols, createCol("SPACE_TYPE", 6165))
+ infSchema["INNODB_TABLESPACES_BRIEF"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 778))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("STATS_INITIALIZED", 6165))
+ cols = append(cols, createCol("NUM_ROWS", 778))
+ cols = append(cols, createCol("CLUST_INDEX_SIZE", 778))
+ cols = append(cols, createCol("OTHER_INDEX_SIZE", 778))
+ cols = append(cols, createCol("MODIFIED_COUNTER", 778))
+ cols = append(cols, createCol("AUTOINC", 778))
+ cols = append(cols, createCol("REF_COUNT", 263))
+ infSchema["INNODB_TABLESTATS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 778))
+ cols = append(cols, createCol("NAME", 6165))
+ cols = append(cols, createCol("N_COLS", 776))
+ cols = append(cols, createCol("SPACE", 776))
+ infSchema["INNODB_TEMP_TABLE_INFO"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("trx_id", 778))
+ cols = append(cols, createCol("trx_state", 6165))
+ cols = append(cols, createCol("trx_started", 2064))
+ cols = append(cols, createCol("trx_requested_lock_id", 6165))
+ cols = append(cols, createCol("trx_wait_started", 2064))
+ cols = append(cols, createCol("trx_weight", 778))
+ cols = append(cols, createCol("trx_mysql_thread_id", 778))
+ cols = append(cols, createCol("trx_query", 6165))
+ cols = append(cols, createCol("trx_operation_state", 6165))
+ cols = append(cols, createCol("trx_tables_in_use", 778))
+ cols = append(cols, createCol("trx_tables_locked", 778))
+ cols = append(cols, createCol("trx_lock_structs", 778))
+ cols = append(cols, createCol("trx_lock_memory_bytes", 778))
+ cols = append(cols, createCol("trx_rows_locked", 778))
+ cols = append(cols, createCol("trx_rows_modified", 778))
+ cols = append(cols, createCol("trx_concurrency_tickets", 778))
+ cols = append(cols, createCol("trx_isolation_level", 6165))
+ cols = append(cols, createCol("trx_unique_checks", 263))
+ cols = append(cols, createCol("trx_foreign_key_checks", 263))
+ cols = append(cols, createCol("trx_last_foreign_key_error", 6165))
+ cols = append(cols, createCol("trx_adaptive_hash_latched", 263))
+ cols = append(cols, createCol("trx_adaptive_hash_timeout", 778))
+ cols = append(cols, createCol("trx_is_read_only", 263))
+ cols = append(cols, createCol("trx_autocommit_non_locking", 263))
+ cols = append(cols, createCol("trx_schedule_weight", 778))
+ infSchema["INNODB_TRX"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_ID", 778))
+ cols = append(cols, createCol("POS", 776))
+ cols = append(cols, createCol("BASE_POS", 776))
+ infSchema["INNODB_VIRTUAL"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("ORDINAL_POSITION", 776))
+ cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 776))
+ cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165))
+ cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165))
+ infSchema["KEY_COLUMN_USAGE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("WORD", 6165))
+ cols = append(cols, createCol("RESERVED", 263))
+ infSchema["KEYWORDS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("QUERY", 6165))
+ cols = append(cols, createCol("TRACE", 6165))
+ cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263))
+ cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257))
+ infSchema["OPTIMIZER_TRACE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPECIFIC_CATALOG", 6165))
+ cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165))
+ cols = append(cols, createCol("SPECIFIC_NAME", 6165))
+ cols = append(cols, createCol("ORDINAL_POSITION", 778))
+ cols = append(cols, createCol("PARAMETER_MODE", 6165))
+ cols = append(cols, createCol("PARAMETER_NAME", 6165))
+ cols = append(cols, createCol("DATA_TYPE", 6163))
+ cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265))
+ cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265))
+ cols = append(cols, createCol("NUMERIC_PRECISION", 776))
+ cols = append(cols, createCol("NUMERIC_SCALE", 265))
+ cols = append(cols, createCol("DATETIME_PRECISION", 776))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("DTD_IDENTIFIER", 6163))
+ cols = append(cols, createCol("ROUTINE_TYPE", 2074))
+ infSchema["PARAMETERS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("PARTITION_NAME", 6165))
+ cols = append(cols, createCol("SUBPARTITION_NAME", 6165))
+ cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 776))
+ cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 776))
+ cols = append(cols, createCol("PARTITION_METHOD", 6165))
+ cols = append(cols, createCol("SUBPARTITION_METHOD", 6165))
+ cols = append(cols, createCol("PARTITION_EXPRESSION", 6165))
+ cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6165))
+ cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163))
+ cols = append(cols, createCol("TABLE_ROWS", 778))
+ cols = append(cols, createCol("AVG_ROW_LENGTH", 778))
+ cols = append(cols, createCol("DATA_LENGTH", 778))
+ cols = append(cols, createCol("MAX_DATA_LENGTH", 778))
+ cols = append(cols, createCol("INDEX_LENGTH", 778))
+ cols = append(cols, createCol("DATA_FREE", 778))
+ cols = append(cols, createCol("CREATE_TIME", 2061))
+ cols = append(cols, createCol("UPDATE_TIME", 2064))
+ cols = append(cols, createCol("CHECK_TIME", 2064))
+ cols = append(cols, createCol("CHECKSUM", 265))
+ cols = append(cols, createCol("PARTITION_COMMENT", 6163))
+ cols = append(cols, createCol("NODEGROUP", 6165))
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ infSchema["PARTITIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("PLUGIN_NAME", 6165))
+ cols = append(cols, createCol("PLUGIN_VERSION", 6165))
+ cols = append(cols, createCol("PLUGIN_STATUS", 6165))
+ cols = append(cols, createCol("PLUGIN_TYPE", 6165))
+ cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165))
+ cols = append(cols, createCol("PLUGIN_LIBRARY", 6165))
+ cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165))
+ cols = append(cols, createCol("PLUGIN_AUTHOR", 6165))
+ cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6165))
+ cols = append(cols, createCol("PLUGIN_LICENSE", 6165))
+ cols = append(cols, createCol("LOAD_OPTION", 6165))
+ infSchema["PLUGINS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("ID", 778))
+ cols = append(cols, createCol("USER", 6165))
+ cols = append(cols, createCol("HOST", 6165))
+ cols = append(cols, createCol("DB", 6165))
+ cols = append(cols, createCol("COMMAND", 6165))
+ cols = append(cols, createCol("TIME", 263))
+ cols = append(cols, createCol("STATE", 6165))
+ cols = append(cols, createCol("INFO", 6165))
+ infSchema["PROCESSLIST"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("QUERY_ID", 263))
+ cols = append(cols, createCol("SEQ", 263))
+ cols = append(cols, createCol("STATE", 6165))
+ cols = append(cols, createCol("DURATION", 18))
+ cols = append(cols, createCol("CPU_USER", 18))
+ cols = append(cols, createCol("CPU_SYSTEM", 18))
+ cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263))
+ cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263))
+ cols = append(cols, createCol("BLOCK_OPS_IN", 263))
+ cols = append(cols, createCol("BLOCK_OPS_OUT", 263))
+ cols = append(cols, createCol("MESSAGES_SENT", 263))
+ cols = append(cols, createCol("MESSAGES_RECEIVED", 263))
+ cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263))
+ cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263))
+ cols = append(cols, createCol("SWAPS", 263))
+ cols = append(cols, createCol("SOURCE_FUNCTION", 6165))
+ cols = append(cols, createCol("SOURCE_FILE", 6165))
+ cols = append(cols, createCol("SOURCE_LINE", 263))
+ infSchema["PROFILING"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("MATCH_OPTION", 2074))
+ cols = append(cols, createCol("UPDATE_RULE", 2074))
+ cols = append(cols, createCol("DELETE_RULE", 2074))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165))
+ infSchema["REFERENTIAL_CONSTRAINTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("RESOURCE_GROUP_NAME", 6165))
+ cols = append(cols, createCol("RESOURCE_GROUP_TYPE", 2074))
+ cols = append(cols, createCol("RESOURCE_GROUP_ENABLED", 257))
+ cols = append(cols, createCol("VCPU_IDS", 10260))
+ cols = append(cols, createCol("THREAD_PRIORITY", 263))
+ infSchema["RESOURCE_GROUPS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTOR", 6165))
+ cols = append(cols, createCol("GRANTOR_HOST", 6165))
+ cols = append(cols, createCol("GRANTEE", 6167))
+ cols = append(cols, createCol("GRANTEE_HOST", 6167))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6167))
+ cols = append(cols, createCol("TABLE_NAME", 6167))
+ cols = append(cols, createCol("COLUMN_NAME", 6167))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 2075))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["ROLE_COLUMN_GRANTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTOR", 6165))
+ cols = append(cols, createCol("GRANTOR_HOST", 6165))
+ cols = append(cols, createCol("GRANTEE", 6167))
+ cols = append(cols, createCol("GRANTEE_HOST", 6167))
+ cols = append(cols, createCol("SPECIFIC_CATALOG", 6165))
+ cols = append(cols, createCol("SPECIFIC_SCHEMA", 6167))
+ cols = append(cols, createCol("SPECIFIC_NAME", 6167))
+ cols = append(cols, createCol("ROUTINE_CATALOG", 6165))
+ cols = append(cols, createCol("ROUTINE_SCHEMA", 6167))
+ cols = append(cols, createCol("ROUTINE_NAME", 6167))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 2075))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["ROLE_ROUTINE_GRANTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTOR", 6165))
+ cols = append(cols, createCol("GRANTOR_HOST", 6165))
+ cols = append(cols, createCol("GRANTEE", 6167))
+ cols = append(cols, createCol("GRANTEE_HOST", 6167))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6167))
+ cols = append(cols, createCol("TABLE_NAME", 6167))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 2075))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["ROLE_TABLE_GRANTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SPECIFIC_NAME", 6165))
+ cols = append(cols, createCol("ROUTINE_CATALOG", 6165))
+ cols = append(cols, createCol("ROUTINE_SCHEMA", 6165))
+ cols = append(cols, createCol("ROUTINE_NAME", 6165))
+ cols = append(cols, createCol("ROUTINE_TYPE", 2074))
+ cols = append(cols, createCol("DATA_TYPE", 6163))
+ cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265))
+ cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265))
+ cols = append(cols, createCol("NUMERIC_PRECISION", 776))
+ cols = append(cols, createCol("NUMERIC_SCALE", 776))
+ cols = append(cols, createCol("DATETIME_PRECISION", 776))
+ cols = append(cols, createCol("CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("COLLATION_NAME", 6165))
+ cols = append(cols, createCol("DTD_IDENTIFIER", 6163))
+ cols = append(cols, createCol("ROUTINE_BODY", 6165))
+ cols = append(cols, createCol("ROUTINE_DEFINITION", 6163))
+ cols = append(cols, createCol("EXTERNAL_NAME", 10264))
+ cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165))
+ cols = append(cols, createCol("PARAMETER_STYLE", 6165))
+ cols = append(cols, createCol("IS_DETERMINISTIC", 6165))
+ cols = append(cols, createCol("SQL_DATA_ACCESS", 2074))
+ cols = append(cols, createCol("SQL_PATH", 10264))
+ cols = append(cols, createCol("SECURITY_TYPE", 2074))
+ cols = append(cols, createCol("CREATED", 2061))
+ cols = append(cols, createCol("LAST_ALTERED", 2061))
+ cols = append(cols, createCol("SQL_MODE", 2075))
+ cols = append(cols, createCol("ROUTINE_COMMENT", 6163))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ cols = append(cols, createCol("DATABASE_COLLATION", 6165))
+ infSchema["ROUTINES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["SCHEMA_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CATALOG_NAME", 6165))
+ cols = append(cols, createCol("SCHEMA_NAME", 6165))
+ cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165))
+ cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165))
+ cols = append(cols, createCol("SQL_PATH", 10264))
+ cols = append(cols, createCol("DEFAULT_ENCRYPTION", 2074))
+ infSchema["SCHEMATA"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CATALOG_NAME", 6165))
+ cols = append(cols, createCol("SCHEMA_NAME", 6165))
+ cols = append(cols, createCol("OPTIONS", 6165))
+ infSchema["SCHEMATA_EXTENSIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("SRS_NAME", 6165))
+ cols = append(cols, createCol("SRS_ID", 776))
+ cols = append(cols, createCol("GEOMETRY_TYPE_NAME", 6163))
+ infSchema["ST_GEOMETRY_COLUMNS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("SRS_NAME", 6165))
+ cols = append(cols, createCol("SRS_ID", 776))
+ cols = append(cols, createCol("ORGANIZATION", 6165))
+ cols = append(cols, createCol("ORGANIZATION_COORDSYS_ID", 776))
+ cols = append(cols, createCol("DEFINITION", 6165))
+ cols = append(cols, createCol("DESCRIPTION", 6165))
+ infSchema["ST_SPATIAL_REFERENCE_SYSTEMS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("UNIT_NAME", 6165))
+ cols = append(cols, createCol("UNIT_TYPE", 6165))
+ cols = append(cols, createCol("CONVERSION_FACTOR", 1036))
+ cols = append(cols, createCol("DESCRIPTION", 6165))
+ infSchema["ST_UNITS_OF_MEASURE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("NON_UNIQUE", 263))
+ cols = append(cols, createCol("INDEX_SCHEMA", 6165))
+ cols = append(cols, createCol("INDEX_NAME", 6165))
+ cols = append(cols, createCol("SEQ_IN_INDEX", 776))
+ cols = append(cols, createCol("COLUMN_NAME", 6165))
+ cols = append(cols, createCol("COLLATION", 6165))
+ cols = append(cols, createCol("CARDINALITY", 265))
+ cols = append(cols, createCol("SUB_PART", 265))
+ cols = append(cols, createCol("PACKED", 10264))
+ cols = append(cols, createCol("NULLABLE", 6165))
+ cols = append(cols, createCol("INDEX_TYPE", 6165))
+ cols = append(cols, createCol("COMMENT", 6165))
+ cols = append(cols, createCol("INDEX_COMMENT", 6165))
+ cols = append(cols, createCol("IS_VISIBLE", 6165))
+ cols = append(cols, createCol("EXPRESSION", 6163))
+ infSchema["STATISTICS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("CONSTRAINT_TYPE", 6165))
+ cols = append(cols, createCol("ENFORCED", 6165))
+ infSchema["TABLE_CONSTRAINTS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165))
+ cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165))
+ cols = append(cols, createCol("CONSTRAINT_NAME", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078))
+ cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078))
+ infSchema["TABLE_CONSTRAINTS_EXTENSIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["TABLE_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("TABLE_TYPE", 2074))
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("VERSION", 263))
+ cols = append(cols, createCol("ROW_FORMAT", 2074))
+ cols = append(cols, createCol("TABLE_ROWS", 778))
+ cols = append(cols, createCol("AVG_ROW_LENGTH", 778))
+ cols = append(cols, createCol("DATA_LENGTH", 778))
+ cols = append(cols, createCol("MAX_DATA_LENGTH", 778))
+ cols = append(cols, createCol("INDEX_LENGTH", 778))
+ cols = append(cols, createCol("DATA_FREE", 778))
+ cols = append(cols, createCol("AUTO_INCREMENT", 778))
+ cols = append(cols, createCol("CREATE_TIME", 2061))
+ cols = append(cols, createCol("UPDATE_TIME", 2064))
+ cols = append(cols, createCol("CHECK_TIME", 2064))
+ cols = append(cols, createCol("TABLE_COLLATION", 6165))
+ cols = append(cols, createCol("CHECKSUM", 265))
+ cols = append(cols, createCol("CREATE_OPTIONS", 6165))
+ cols = append(cols, createCol("TABLE_COMMENT", 6163))
+ infSchema["TABLES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078))
+ cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078))
+ infSchema["TABLES_EXTENSIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ cols = append(cols, createCol("ENGINE", 6165))
+ cols = append(cols, createCol("TABLESPACE_TYPE", 6165))
+ cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165))
+ cols = append(cols, createCol("EXTENT_SIZE", 778))
+ cols = append(cols, createCol("AUTOEXTEND_SIZE", 778))
+ cols = append(cols, createCol("MAXIMUM_SIZE", 778))
+ cols = append(cols, createCol("NODEGROUP_ID", 778))
+ cols = append(cols, createCol("TABLESPACE_COMMENT", 6165))
+ infSchema["TABLESPACES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLESPACE_NAME", 6165))
+ cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078))
+ infSchema["TABLESPACES_EXTENSIONS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TRIGGER_CATALOG", 6165))
+ cols = append(cols, createCol("TRIGGER_SCHEMA", 6165))
+ cols = append(cols, createCol("TRIGGER_NAME", 6165))
+ cols = append(cols, createCol("EVENT_MANIPULATION", 2074))
+ cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165))
+ cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165))
+ cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165))
+ cols = append(cols, createCol("ACTION_ORDER", 776))
+ cols = append(cols, createCol("ACTION_CONDITION", 10264))
+ cols = append(cols, createCol("ACTION_STATEMENT", 6163))
+ cols = append(cols, createCol("ACTION_ORIENTATION", 6165))
+ cols = append(cols, createCol("ACTION_TIMING", 2074))
+ cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 10264))
+ cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 10264))
+ cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165))
+ cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165))
+ cols = append(cols, createCol("CREATED", 2061))
+ cols = append(cols, createCol("SQL_MODE", 2075))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ cols = append(cols, createCol("DATABASE_COLLATION", 6165))
+ infSchema["TRIGGERS"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("USER", 6167))
+ cols = append(cols, createCol("HOST", 6167))
+ cols = append(cols, createCol("ATTRIBUTE", 6163))
+ infSchema["USER_ATTRIBUTES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("GRANTEE", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("PRIVILEGE_TYPE", 6165))
+ cols = append(cols, createCol("IS_GRANTABLE", 6165))
+ infSchema["USER_PRIVILEGES"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("SPECIFIC_CATALOG", 6165))
+ cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165))
+ cols = append(cols, createCol("SPECIFIC_NAME", 6165))
+ infSchema["VIEW_ROUTINE_USAGE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("VIEW_CATALOG", 6165))
+ cols = append(cols, createCol("VIEW_SCHEMA", 6165))
+ cols = append(cols, createCol("VIEW_NAME", 6165))
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ infSchema["VIEW_TABLE_USAGE"] = cols
+ cols = []vindexes.Column{}
+ cols = append(cols, createCol("TABLE_CATALOG", 6165))
+ cols = append(cols, createCol("TABLE_SCHEMA", 6165))
+ cols = append(cols, createCol("TABLE_NAME", 6165))
+ cols = append(cols, createCol("VIEW_DEFINITION", 6163))
+ cols = append(cols, createCol("CHECK_OPTION", 2074))
+ cols = append(cols, createCol("IS_UPDATABLE", 2074))
+ cols = append(cols, createCol("DEFINER", 6165))
+ cols = append(cols, createCol("SECURITY_TYPE", 6165))
+ cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165))
+ cols = append(cols, createCol("COLLATION_CONNECTION", 6165))
+ infSchema["VIEWS"] = cols
+
+ return infSchema
+}
+
+type infoSchemaWithColumns struct {
+ inner SchemaInformation
+ infoSchemaData map[string][]vindexes.Column
+}
+
+// newSchemaInfo returns a SchemaInformation that has the column information for all info_schema tables
+func newSchemaInfo(inner SchemaInformation) SchemaInformation {
+ version := servenv.MySQLServerVersion()
+ var infoSchema map[string][]vindexes.Column
+ if strings.HasPrefix(version, "5.7") {
+ infoSchema = getInfoSchema57()
+ } else {
+ infoSchema = getInfoSchema80()
+ }
+ return &infoSchemaWithColumns{inner: inner, infoSchemaData: infoSchema}
+}
+
+// FindTableOrVindex implements the SchemaInformation interface
+func (i *infoSchemaWithColumns) FindTableOrVindex(tbl sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) {
+ if !strings.EqualFold(tbl.Qualifier.String(), "information_schema") {
+ return i.inner.FindTableOrVindex(tbl)
+ }
+
+ ks := vindexes.Keyspace{
+ Name: "information_schema",
+ Sharded: false,
+ }
+ cols := i.infoSchemaData[strings.ToUpper(tbl.Name.String())]
+ vtbl := &vindexes.Table{
+ Type: "View",
+ Name: sqlparser.NewIdentifierCS(tbl.Name.String()),
+ Keyspace: &ks,
+ Columns: cols,
+ ColumnListAuthoritative: true,
+ }
+ return vtbl, nil, "", topodatapb.TabletType_UNKNOWN, nil, nil
+}
+
+// ConnCollation implements the SchemaInformation interface
+func (i *infoSchemaWithColumns) ConnCollation() collations.ID {
+ return i.inner.ConnCollation()
+}
diff --git a/go/vt/vtgate/semantics/info_schema_gen_test.go b/go/vt/vtgate/semantics/info_schema_gen_test.go
new file mode 100644
index 00000000000..c5fe0123852
--- /dev/null
+++ b/go/vt/vtgate/semantics/info_schema_gen_test.go
@@ -0,0 +1,228 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package semantics
+
+import (
+ "database/sql"
+ "fmt"
+ "regexp"
+ "strings"
+ "testing"
+
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/sqlparser"
+)
+
+func TestGenerateInfoSchemaMap(t *testing.T) {
+ t.Skip("run manually to re-create the content of the getInfoSchema functions")
+ b := new(strings.Builder)
+
+ db, err := sql.Open("mysql", "root@tcp(127.0.0.1:3306)/test")
+ require.NoError(t, err)
+ defer db.Close()
+
+ for _, tbl := range informationSchemaTables80 {
+ b.WriteString("cols = []vindexes.Column{}\n")
+ result, err := db.Query(fmt.Sprintf("show columns from information_schema.`%s`", tbl))
+ require.NoError(t, err)
+ defer result.Close()
+ for result.Next() {
+ var r row
+ result.Scan(&r.Field, &r.Type, &r.Null, &r.Key, &r.Default, &r.Extra)
+ allString := re.FindStringSubmatch(r.Type)
+ var typ string
+ if allString == nil {
+ typ = r.Type
+ } else {
+ typ = allString[1]
+ }
+ unsigned := false
+ if idx := strings.Index(typ, "unsigned"); idx > 0 {
+ typ = typ[:idx-1]
+ unsigned = true
+ }
+ i2 := sqlparser.SQLTypeToQueryType(typ, unsigned)
+ if int(i2) == 0 {
+ t.Fatalf("%s %s", tbl, r.Field)
+ }
+ b.WriteString(fmt.Sprintf("cols = append(cols, createCol(\"%s\", %d))\n", r.Field, int(i2)))
+ }
+ b.WriteString(fmt.Sprintf("infSchema[\"%s\"] = cols\n", tbl))
+ }
+
+ fmt.Println(b.String())
+}
+
+var (
+ informationSchemaTables57 = []string{
+ "CHARACTER_SETS",
+ "COLLATION_CHARACTER_SET_APPLICABILITY",
+ "COLLATIONS",
+ "COLUMN_PRIVILEGES",
+ "COLUMNS",
+ "ENGINES",
+ "EVENTS",
+ "FILES",
+ "GLOBAL_STATUS",
+ "GLOBAL_VARIABLES",
+ "INNODB_BUFFER_PAGE",
+ "INNODB_BUFFER_PAGE_LRU",
+ "INNODB_BUFFER_POOL_STATS",
+ "INNODB_CMP",
+ "INNODB_CMP_PER_INDEX",
+ "INNODB_CMP_PER_INDEX_RESET",
+ "INNODB_CMP_RESET",
+ "INNODB_CMPMEM",
+ "INNODB_CMPMEM_RESET",
+ "INNODB_FT_BEING_DELETED",
+ "INNODB_FT_CONFIG",
+ "INNODB_FT_DEFAULT_STOPWORD",
+ "INNODB_FT_DELETED",
+ "INNODB_FT_INDEX_CACHE",
+ "INNODB_FT_INDEX_TABLE",
+ "INNODB_LOCK_WAITS",
+ "INNODB_LOCKS",
+ "INNODB_METRICS",
+ "INNODB_SYS_COLUMNS",
+ "INNODB_SYS_DATAFILES",
+ "INNODB_SYS_FIELDS",
+ "INNODB_SYS_FOREIGN",
+ "INNODB_SYS_FOREIGN_COLS",
+ "INNODB_SYS_INDEXES",
+ "INNODB_SYS_TABLES",
+ "INNODB_SYS_TABLESPACES",
+ "INNODB_SYS_TABLESTATS",
+ "INNODB_SYS_VIRTUAL",
+ "INNODB_TEMP_TABLE_INFO",
+ "INNODB_TRX",
+ "KEY_COLUMN_USAGE",
+ "OPTIMIZER_TRACE",
+ "PARAMETERS",
+ "PARTITIONS",
+ "PLUGINS",
+ "PROCESSLIST",
+ "PROFILING",
+ "REFERENTIAL_CONSTRAINTS",
+ "ROUTINES",
+ "SCHEMA_PRIVILEGES",
+ "SCHEMATA",
+ "SESSION_STATUS",
+ "SESSION_VARIABLES",
+ "STATISTICS",
+ "TABLE_CONSTRAINTS",
+ "TABLE_PRIVILEGES",
+ "TABLES",
+ "TABLESPACES",
+ "TRIGGERS",
+ "USER_PRIVILEGES",
+ "VIEWS",
+ }
+ informationSchemaTables80 = []string{
+ "ADMINISTRABLE_ROLE_AUTHORIZATIONS",
+ "APPLICABLE_ROLES",
+ "CHARACTER_SETS",
+ "CHECK_CONSTRAINTS",
+ "COLLATION_CHARACTER_SET_APPLICABILITY",
+ "COLLATIONS",
+ "COLUMN_PRIVILEGES",
+ "COLUMN_STATISTICS",
+ "COLUMNS",
+ "COLUMNS_EXTENSIONS",
+ "ENABLED_ROLES",
+ "ENGINES",
+ "EVENTS",
+ "FILES",
+ "INNODB_BUFFER_PAGE",
+ "INNODB_BUFFER_PAGE_LRU",
+ "INNODB_BUFFER_POOL_STATS",
+ "INNODB_CACHED_INDEXES",
+ "INNODB_CMP",
+ "INNODB_CMP_PER_INDEX",
+ "INNODB_CMP_PER_INDEX_RESET",
+ "INNODB_CMP_RESET",
+ "INNODB_CMPMEM",
+ "INNODB_CMPMEM_RESET",
+ "INNODB_COLUMNS",
+ "INNODB_DATAFILES",
+ "INNODB_FIELDS",
+ "INNODB_FOREIGN",
+ "INNODB_FOREIGN_COLS",
+ "INNODB_FT_BEING_DELETED",
+ "INNODB_FT_CONFIG",
+ "INNODB_FT_DEFAULT_STOPWORD",
+ "INNODB_FT_DELETED",
+ "INNODB_FT_INDEX_CACHE",
+ "INNODB_FT_INDEX_TABLE",
+ "INNODB_INDEXES",
+ "INNODB_METRICS",
+ "INNODB_SESSION_TEMP_TABLESPACES",
+ "INNODB_TABLES",
+ "INNODB_TABLESPACES",
+ "INNODB_TABLESPACES_BRIEF",
+ "INNODB_TABLESTATS",
+ "INNODB_TEMP_TABLE_INFO",
+ "INNODB_TRX",
+ "INNODB_VIRTUAL",
+ "KEY_COLUMN_USAGE",
+ "KEYWORDS",
+ "OPTIMIZER_TRACE",
+ "PARAMETERS",
+ "PARTITIONS",
+ "PLUGINS",
+ "PROCESSLIST",
+ "PROFILING",
+ "REFERENTIAL_CONSTRAINTS",
+ "RESOURCE_GROUPS",
+ "ROLE_COLUMN_GRANTS",
+ "ROLE_ROUTINE_GRANTS",
+ "ROLE_TABLE_GRANTS",
+ "ROUTINES",
+ "SCHEMA_PRIVILEGES",
+ "SCHEMATA",
+ "SCHEMATA_EXTENSIONS",
+ "ST_GEOMETRY_COLUMNS",
+ "ST_SPATIAL_REFERENCE_SYSTEMS",
+ "ST_UNITS_OF_MEASURE",
+ "STATISTICS",
+ "TABLE_CONSTRAINTS",
+ "TABLE_CONSTRAINTS_EXTENSIONS",
+ "TABLE_PRIVILEGES",
+ "TABLES",
+ "TABLES_EXTENSIONS",
+ "TABLESPACES",
+ "TABLESPACES_EXTENSIONS",
+ "TRIGGERS",
+ "USER_ATTRIBUTES",
+ "USER_PRIVILEGES",
+ "VIEW_ROUTINE_USAGE",
+ "VIEW_TABLE_USAGE",
+ "VIEWS",
+ }
+)
+
+type row struct {
+ Field string
+ Type string
+ Null string
+ Key any
+ Default any
+ Extra any
+}
+
+var re = regexp.MustCompile(`(.*)\((.*)\)`)
diff --git a/go/vt/vtgate/semantics/scoper.go b/go/vt/vtgate/semantics/scoper.go
index 5d5f1c85102..349d131e725 100644
--- a/go/vt/vtgate/semantics/scoper.go
+++ b/go/vt/vtgate/semantics/scoper.go
@@ -46,6 +46,7 @@ type (
tables []TableInfo
isUnion bool
joinUsing map[string]TableSet
+ stmtScope bool
}
)
@@ -62,11 +63,13 @@ func (s *scoper) down(cursor *sqlparser.Cursor) error {
switch node := node.(type) {
case *sqlparser.Update, *sqlparser.Delete:
currScope := newScope(s.currentScope())
+ currScope.stmtScope = true
s.push(currScope)
currScope.stmt = node.(sqlparser.Statement)
case *sqlparser.Select:
currScope := newScope(s.currentScope())
+ currScope.stmtScope = true
s.push(currScope)
// Needed for order by with Literal to find the Expression.
@@ -77,10 +80,10 @@ func (s *scoper) down(cursor *sqlparser.Cursor) error {
case sqlparser.TableExpr:
if isParentSelect(cursor) {
// when checking the expressions used in JOIN conditions, special rules apply where the ON expression
- // can only see the two tables involved in the JOIN, and no other tables.
- // To create this special context, we create a special scope here that is then merged with
- // the surrounding scope when we come back out from the JOIN
- nScope := newScope(nil)
+ // can only see the two tables involved in the JOIN, and no other tables of that select statement.
+ // They are allowed to see the tables of the outer select query.
+ // To create this special context, we will find the parent scope of the select statement involved.
+ nScope := newScope(s.currentScope().findParentScopeOfStatement())
nScope.stmt = cursor.Parent().(*sqlparser.Select)
s.push(nScope)
}
@@ -289,3 +292,14 @@ func (s *scope) prepareUsingMap() (result map[TableSet]map[string]TableSet) {
}
return
}
+
+// findParentScopeOfStatement finds the scope that belongs to a statement.
+func (s *scope) findParentScopeOfStatement() *scope {
+ if s.stmtScope {
+ return s.parent
+ }
+ if s.parent == nil {
+ return nil
+ }
+ return s.parent.findParentScopeOfStatement()
+}
diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go
index c96fbc4d811..417a25b3405 100644
--- a/go/vt/vtgate/semantics/semantic_state.go
+++ b/go/vt/vtgate/semantics/semantic_state.go
@@ -116,8 +116,8 @@ type (
)
var (
- // ErrMultipleTables refers to an error happening when something should be used only for single tables
- ErrMultipleTables = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] should only be used for single tables")
+ // ErrNotSingleTable refers to an error happening when something should be used only for single tables
+ ErrNotSingleTable = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] should only be used for single tables")
)
// CopyDependencies copies the dependencies from one expression into the other
@@ -142,7 +142,7 @@ func (st *SemTable) TableSetFor(t *sqlparser.AliasedTableExpr) TableSet {
return SingleTableSet(idx)
}
}
- return TableSet{}
+ return EmptyTableSet()
}
// ReplaceTableSetFor replaces the given single TabletSet with the new *sqlparser.AliasedTableExpr
@@ -169,7 +169,7 @@ func (st *SemTable) ReplaceTableSetFor(id TableSet, t *sqlparser.AliasedTableExp
func (st *SemTable) TableInfoFor(id TableSet) (TableInfo, error) {
offset := id.TableOffset()
if offset < 0 {
- return nil, ErrMultipleTables
+ return nil, ErrNotSingleTable
}
return st.Tables[offset], nil
}
@@ -287,12 +287,12 @@ func (d ExprDependencies) dependencies(expr sqlparser.Expr) (deps TableSet) {
if extracted, ok := expr.(*sqlparser.ExtractedSubquery); ok {
if extracted.OtherSide != nil {
set := d.dependencies(extracted.OtherSide)
- deps.MergeInPlace(set)
+ deps = deps.Merge(set)
}
return false, nil
}
set, found := d[expr]
- deps.MergeInPlace(set)
+ deps = deps.Merge(set)
// if we found a cached value, there is no need to continue down to visit children
return !found, nil
@@ -361,7 +361,7 @@ var _ evalengine.TranslationLookup = (*SemTable)(nil)
var columnNotSupportedErr = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "column access not supported here")
// ColumnLookup implements the TranslationLookup interface
-func (st *SemTable) ColumnLookup(col *sqlparser.ColName) (int, error) {
+func (st *SemTable) ColumnLookup(*sqlparser.ColName) (int, error) {
return 0, columnNotSupportedErr
}
@@ -372,7 +372,7 @@ func (st *SemTable) SingleUnshardedKeyspace() (*vindexes.Keyspace, []*vindexes.T
for _, table := range st.Tables {
vindexTable := table.GetVindexTable()
- if vindexTable == nil || vindexTable.Type != "" {
+ if vindexTable == nil {
_, isDT := table.getExpr().Expr.(*sqlparser.DerivedTable)
if isDT {
// derived tables are ok, as long as all real tables are from the same unsharded keyspace
@@ -381,6 +381,13 @@ func (st *SemTable) SingleUnshardedKeyspace() (*vindexes.Keyspace, []*vindexes.T
}
return nil, nil
}
+ if vindexTable.Type != "" {
+ // A reference table is not an issue when seeing if a query is going to an unsharded keyspace
+ if vindexTable.Type == vindexes.TypeReference {
+ continue
+ }
+ return nil, nil
+ }
name, ok := table.getExpr().Expr.(sqlparser.TableName)
if !ok {
return nil, nil
diff --git a/go/vt/vtgate/semantics/table_collector.go b/go/vt/vtgate/semantics/table_collector.go
index dc6d1369c80..8266287abe9 100644
--- a/go/vt/vtgate/semantics/table_collector.go
+++ b/go/vt/vtgate/semantics/table_collector.go
@@ -84,19 +84,16 @@ func (tc *tableCollector) up(cursor *sqlparser.Cursor) error {
case sqlparser.TableName:
var tbl *vindexes.Table
var vindex vindexes.Vindex
- var isInfSchema bool
- if sqlparser.SystemSchema(t.Qualifier.String()) {
- isInfSchema = true
- } else {
- var err error
- tbl, vindex, _, _, _, err = tc.si.FindTableOrVindex(t)
- if err != nil {
- return err
- }
- if tbl == nil && vindex != nil {
- tbl = newVindexTable(t.Name)
- }
+ isInfSchema := sqlparser.SystemSchema(t.Qualifier.String())
+ var err error
+ tbl, vindex, _, _, _, err = tc.si.FindTableOrVindex(t)
+ if err != nil {
+ return err
}
+ if tbl == nil && vindex != nil {
+ tbl = newVindexTable(t.Name)
+ }
+
scope := tc.scoper.currentScope()
tableInfo := tc.createTable(t, node, tbl, isInfSchema, vindex)
@@ -138,7 +135,7 @@ func (tc *tableCollector) tableSetFor(t *sqlparser.AliasedTableExpr) TableSet {
func (tc *tableCollector) tableInfoFor(id TableSet) (TableInfo, error) {
offset := id.TableOffset()
if offset < 0 {
- return nil, ErrMultipleTables
+ return nil, ErrNotSingleTable
}
return tc.Tables[offset], nil
}
diff --git a/go/vt/vtgate/semantics/table_set.go b/go/vt/vtgate/semantics/table_set.go
new file mode 100644
index 00000000000..aa9042f9587
--- /dev/null
+++ b/go/vt/vtgate/semantics/table_set.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package semantics
+
+import (
+ "fmt"
+
+ "vitess.io/vitess/go/vt/vtgate/semantics/bitset"
+)
+
+// TableSet is how a set of tables is expressed.
+// Tables get unique bits assigned in the order that they are encountered during semantic analysis.
+type TableSet bitset.Bitset
+
+// Format formats the TableSet.
+func (ts TableSet) Format(f fmt.State, _ rune) {
+ first := true
+ fmt.Fprintf(f, "TableSet{")
+ bitset.Bitset(ts).ForEach(func(tid int) {
+ if first {
+ fmt.Fprintf(f, "%d", tid)
+ first = false
+ } else {
+ fmt.Fprintf(f, ",%d", tid)
+ }
+ })
+ fmt.Fprintf(f, "}")
+}
+
+// IsOverlapping returns true if at least one table exists in both sets
+func (ts TableSet) IsOverlapping(other TableSet) bool {
+ return bitset.Bitset(ts).Overlaps(bitset.Bitset(other))
+}
+
+// IsSolvedBy returns true if all of `ts` is contained in `other`
+func (ts TableSet) IsSolvedBy(other TableSet) bool {
+ return bitset.Bitset(ts).IsContainedBy(bitset.Bitset(other))
+}
+
+// NumberOfTables returns the number of bits set
+func (ts TableSet) NumberOfTables() int {
+ return bitset.Bitset(ts).Popcount()
+}
+
+// NonEmpty returns true if there are tables in the tableset
+func (ts TableSet) NonEmpty() bool {
+ return !ts.IsEmpty()
+}
+
+// IsEmpty returns true if there are no tables in the tableset
+func (ts TableSet) IsEmpty() bool {
+ return len(ts) == 0
+}
+
+// TableOffset returns the offset in the Tables array from TableSet
+func (ts TableSet) TableOffset() int {
+ return bitset.Bitset(ts).SingleBit()
+}
+
+// ForEachTable calls the given callback with the indices for all tables in this TableSet
+func (ts TableSet) ForEachTable(callback func(int)) {
+ bitset.Bitset(ts).ForEach(callback)
+}
+
+// Constituents returns a slice with the indices for all tables in this TableSet
+func (ts TableSet) Constituents() (result []TableSet) {
+ ts.ForEachTable(func(t int) {
+ result = append(result, SingleTableSet(t))
+ })
+ return
+}
+
+// Merge creates a TableSet that contains both inputs
+func (ts TableSet) Merge(other TableSet) TableSet {
+ return TableSet(bitset.Bitset(ts).Or(bitset.Bitset(other)))
+}
+
+// Remove returns a new TableSet with all the tables in `other` removed
+func (ts TableSet) Remove(other TableSet) TableSet {
+ return TableSet(bitset.Bitset(ts).AndNot(bitset.Bitset(other)))
+}
+
+// KeepOnly removes all the tables not in `other` from this TableSet
+func (ts TableSet) KeepOnly(other TableSet) TableSet {
+ return TableSet(bitset.Bitset(ts).And(bitset.Bitset(other)))
+}
+
+// WithTable returns a new TableSet that contains this table too
+func (ts TableSet) WithTable(tableidx int) TableSet {
+ return TableSet(bitset.Bitset(ts).Set(tableidx))
+}
+
+// SingleTableSet creates a TableSet that contains only the given table
+func SingleTableSet(tableidx int) TableSet {
+ return TableSet(bitset.Single(tableidx))
+}
+
+// EmptyTableSet creates an empty TableSet
+func EmptyTableSet() TableSet {
+ return ""
+}
+
+// MergeTableSets merges all the given TableSet into a single one
+func MergeTableSets(tss ...TableSet) TableSet {
+ var result bitset.Bitset
+ for _, t := range tss {
+ result = result.Or(bitset.Bitset(t))
+ }
+ return TableSet(result)
+}
+
+// TableSetFromIds returns TableSet for all the id passed in argument.
+func TableSetFromIds(tids ...int) (ts TableSet) {
+ return TableSet(bitset.Build(tids...))
+}
diff --git a/go/vt/vtgate/semantics/tabletset_test.go b/go/vt/vtgate/semantics/table_set_test.go
similarity index 93%
rename from go/vt/vtgate/semantics/tabletset_test.go
rename to go/vt/vtgate/semantics/table_set_test.go
index 50e437220b6..03d0d91cc9e 100644
--- a/go/vt/vtgate/semantics/tabletset_test.go
+++ b/go/vt/vtgate/semantics/table_set_test.go
@@ -52,7 +52,7 @@ func TestTableSet_Constituents(t *testing.T) {
assert.Equal(t, []TableSet{F1, F2}, (F12).Constituents())
assert.Equal(t, []TableSet{F1, F3}, (F1.Merge(F3)).Constituents())
assert.Equal(t, []TableSet{F2, F3}, (F2.Merge(F3)).Constituents())
- assert.Empty(t, TableSet{}.Constituents())
+ assert.Empty(t, EmptyTableSet().Constituents())
}
func TestTableSet_TableOffset(t *testing.T) {
@@ -71,7 +71,7 @@ func TestTableSet_LargeTablesConstituents(t *testing.T) {
for t := 0; t < 256; t++ {
table += rand.Intn(GapSize) + 1
expected = append(expected, SingleTableSet(table))
- ts.AddTable(table)
+ ts = ts.WithTable(table)
}
assert.Equal(t, expected, ts.Constituents())
@@ -84,17 +84,15 @@ func TestTabletSet_LargeMergeInPlace(t *testing.T) {
var tablesets = make([]TableSet, 64)
for i := range tablesets {
- ts := &tablesets[i]
setrng := i * SetRange
-
for tid := 0; tid < SetRange; tid++ {
- ts.AddTable(setrng + tid)
+ tablesets[i] = tablesets[i].WithTable(setrng + tid)
}
}
var result TableSet
for _, ts := range tablesets {
- result.MergeInPlace(ts)
+ result = result.Merge(ts)
}
var expected = make([]TableSet, SetRange*Blocks)
@@ -112,11 +110,9 @@ func TestTabletSet_LargeMerge(t *testing.T) {
var tablesets = make([]TableSet, 64)
for i := range tablesets {
- ts := &tablesets[i]
setrng := i * SetRange
-
for tid := 0; tid < SetRange; tid++ {
- ts.AddTable(setrng + tid)
+ tablesets[i] = tablesets[i].WithTable(setrng + tid)
}
}
@@ -172,8 +168,8 @@ func TestTableSet_KeepOnly(t *testing.T) {
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
- testcase.ts1.KeepOnly(testcase.ts2)
- assert.Equal(t, testcase.result, testcase.ts1)
+ keep := testcase.ts1.KeepOnly(testcase.ts2)
+ assert.Equal(t, testcase.result, keep)
})
}
}
@@ -210,8 +206,8 @@ func TestTableSet_RemoveInPlace(t *testing.T) {
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
- testcase.ts1.RemoveInPlace(testcase.ts2)
- assert.Equal(t, testcase.result, testcase.ts1)
+ remove := testcase.ts1.Remove(testcase.ts2)
+ assert.Equal(t, testcase.result, remove)
})
}
}
diff --git a/go/vt/vtgate/semantics/tabletset.go b/go/vt/vtgate/semantics/tabletset.go
deleted file mode 100644
index 422579d7293..00000000000
--- a/go/vt/vtgate/semantics/tabletset.go
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package semantics
-
-import (
- "fmt"
- "math/bits"
-)
-
-type largeTableSet struct {
- tables []uint64
-}
-
-func (ts *largeTableSet) overlapsSmall(small uint64) bool {
- return ts.tables[0]&small != 0
-}
-
-func minlen(a, b []uint64) int {
- if len(a) < len(b) {
- return len(a)
- }
- return len(b)
-}
-
-func (ts *largeTableSet) overlaps(b *largeTableSet) bool {
- min := minlen(ts.tables, b.tables)
- for t := 0; t < min; t++ {
- if ts.tables[t]&b.tables[t] != 0 {
- return true
- }
- }
- return false
-}
-
-func (ts *largeTableSet) containsSmall(small uint64) bool {
- return small&ts.tables[0] == small
-}
-
-func (ts *largeTableSet) isContainedBy(b *largeTableSet) bool {
- if len(ts.tables) > len(b.tables) {
- return false
- }
- for i, t := range ts.tables {
- if t&b.tables[i] != t {
- return false
- }
- }
- return true
-}
-
-func (ts *largeTableSet) popcount() (count int) {
- for _, t := range ts.tables {
- count += bits.OnesCount64(t)
- }
- return
-}
-
-func (ts *largeTableSet) merge(other *largeTableSet) *largeTableSet {
- small, large := ts.tables, other.tables
- if len(small) > len(large) {
- small, large = large, small
- }
-
- merged := make([]uint64, len(large))
- m := 0
-
- for m < len(small) {
- merged[m] = small[m] | large[m]
- m++
- }
- for m < len(large) {
- merged[m] = large[m]
- m++
- }
-
- return &largeTableSet{merged}
-}
-
-func (ts *largeTableSet) mergeSmall(small uint64) *largeTableSet {
- merged := make([]uint64, len(ts.tables))
- copy(merged, ts.tables)
- merged[0] |= small
- return &largeTableSet{merged}
-}
-
-func (ts *largeTableSet) mergeInPlace(other *largeTableSet) {
- if len(other.tables) > len(ts.tables) {
- merged := make([]uint64, len(other.tables))
- copy(merged, ts.tables)
- ts.tables = merged
- }
- for i := range other.tables {
- ts.tables[i] |= other.tables[i]
- }
-}
-
-func (ts *largeTableSet) mergeSmallInPlace(small uint64) {
- ts.tables[0] |= small
-}
-
-func (ts *largeTableSet) tableOffset() (offset int) {
- var found bool
- for chunk, t := range ts.tables {
- if t == 0 {
- continue
- }
- if found || bits.OnesCount64(t) != 1 {
- return -1
- }
- offset = chunk*64 + bits.TrailingZeros64(t)
- found = true
- }
- return
-}
-
-func (ts *largeTableSet) add(tableidx int) {
- chunk := tableidx / 64
- offset := tableidx % 64
-
- if len(ts.tables) <= chunk {
- tables := make([]uint64, chunk+1)
- copy(tables, ts.tables)
- ts.tables = tables
- }
-
- ts.tables[chunk] |= 1 << offset
-}
-
-func (ts *largeTableSet) foreach(callback func(int)) {
- for idx, bitset := range ts.tables {
- for bitset != 0 {
- t := bitset & -bitset
- r := bits.TrailingZeros64(bitset)
- callback(idx*64 + r)
- bitset ^= t
- }
- }
-}
-
-func newLargeTableSet(small uint64, tableidx int) *largeTableSet {
- chunk := tableidx / 64
- offset := tableidx % 64
-
- tables := make([]uint64, chunk+1)
- tables[0] = small
- tables[chunk] |= 1 << offset
-
- return &largeTableSet{tables}
-}
-
-// TableSet is how a set of tables is expressed.
-// Tables get unique bits assigned in the order that they are encountered during semantic analysis.
-// This TableSet implementation is optimized for sets of less than 64 tables, but can grow to support an arbitrary
-// large amount of tables.
-type TableSet struct {
- small uint64
- large *largeTableSet
-}
-
-// Format formats the TableSet.
-func (ts TableSet) Format(f fmt.State, verb rune) {
- first := true
- fmt.Fprintf(f, "TableSet{")
- ts.ForEachTable(func(tid int) {
- if first {
- fmt.Fprintf(f, "%d", tid)
- first = false
- } else {
- fmt.Fprintf(f, ",%d", tid)
- }
- })
- fmt.Fprintf(f, "}")
-}
-
-// IsOverlapping returns true if at least one table exists in both sets
-func (ts TableSet) IsOverlapping(other TableSet) bool {
- switch {
- case ts.large == nil && other.large == nil:
- return ts.small&other.small != 0
- case ts.large == nil:
- return other.large.overlapsSmall(ts.small)
- case other.large == nil:
- return ts.large.overlapsSmall(other.small)
- default:
- return ts.large.overlaps(other.large)
- }
-}
-
-// IsSolvedBy returns true if all of `ts` is contained in `other`
-func (ts TableSet) IsSolvedBy(other TableSet) bool {
- switch {
- case ts.large == nil && other.large == nil:
- return ts.small&other.small == ts.small
- case ts.large == nil:
- return other.large.containsSmall(ts.small)
- case other.large == nil:
- // if we're a large table and other is not, we cannot be contained by other
- return false
- default:
- return ts.large.isContainedBy(other.large)
- }
-}
-
-// Equals returns true if `ts` and `other` contain the same tables
-func (ts TableSet) Equals(other TableSet) bool {
- return ts.IsSolvedBy(other) && other.IsSolvedBy(ts)
-}
-
-// NumberOfTables returns the number of bits set
-func (ts TableSet) NumberOfTables() int {
- if ts.large == nil {
- return bits.OnesCount64(ts.small)
- }
- return ts.large.popcount()
-}
-
-// TableOffset returns the offset in the Tables array from TableSet
-func (ts TableSet) TableOffset() int {
- if ts.large == nil {
- if bits.OnesCount64(ts.small) != 1 {
- return -1
- }
- return bits.TrailingZeros64(ts.small)
- }
- return ts.large.tableOffset()
-}
-
-// ForEachTable calls the given callback with the indices for all tables in this TableSet
-func (ts TableSet) ForEachTable(callback func(int)) {
- if ts.large == nil {
- bitset := ts.small
- for bitset != 0 {
- t := bitset & -bitset
- callback(bits.TrailingZeros64(bitset))
- bitset ^= t
- }
- } else {
- ts.large.foreach(callback)
- }
-}
-
-// Constituents returns a slice with the indices for all tables in this TableSet
-func (ts TableSet) Constituents() (result []TableSet) {
- ts.ForEachTable(func(t int) {
- result = append(result, SingleTableSet(t))
- })
- return
-}
-
-// Merge creates a TableSet that contains both inputs
-func (ts TableSet) Merge(other TableSet) TableSet {
- switch {
- case ts.large == nil && other.large == nil:
- return TableSet{small: ts.small | other.small}
- case ts.large == nil:
- return TableSet{large: other.large.mergeSmall(ts.small)}
- case other.large == nil:
- return TableSet{large: ts.large.mergeSmall(other.small)}
- default:
- return TableSet{large: ts.large.merge(other.large)}
- }
-}
-
-// MergeInPlace merges all the tables in `other` into this TableSet
-func (ts *TableSet) MergeInPlace(other TableSet) {
- switch {
- case ts.large == nil && other.large == nil:
- ts.small |= other.small
- case ts.large == nil:
- ts.large = other.large.mergeSmall(ts.small)
- case other.large == nil:
- ts.large.mergeSmallInPlace(other.small)
- default:
- ts.large.mergeInPlace(other.large)
- }
-}
-
-// RemoveInPlace removes all the tables in `other` from this TableSet
-func (ts *TableSet) RemoveInPlace(other TableSet) {
- switch {
- case ts.large == nil && other.large == nil:
- ts.small &= ^other.small
- case ts.large == nil:
- ts.small &= ^other.large.tables[0]
- case other.large == nil:
- ts.large.tables[0] &= ^other.small
- default:
- for idx := range ts.large.tables {
- if len(other.large.tables) <= idx {
- break
- }
- ts.large.tables[idx] &= ^other.large.tables[idx]
- }
- }
-}
-
-// KeepOnly removes all the tables not in `other` from this TableSet
-func (ts *TableSet) KeepOnly(other TableSet) {
- switch {
- case ts.large == nil && other.large == nil:
- ts.small &= other.small
- case ts.large == nil:
- ts.small &= other.large.tables[0]
- case other.large == nil:
- ts.small = ts.large.tables[0] & other.small
- ts.large = nil
- default:
- for idx := range ts.large.tables {
- if len(other.large.tables) <= idx {
- ts.large.tables = ts.large.tables[0:idx]
- break
- }
- ts.large.tables[idx] &= other.large.tables[idx]
- }
- }
-}
-
-// AddTable adds the given table to this set
-func (ts *TableSet) AddTable(tableidx int) {
- switch {
- case ts.large == nil && tableidx < 64:
- ts.small |= 1 << tableidx
- case ts.large == nil:
- ts.large = newLargeTableSet(ts.small, tableidx)
- default:
- ts.large.add(tableidx)
- }
-}
-
-// SingleTableSet creates a TableSet that contains only the given table
-func SingleTableSet(tableidx int) TableSet {
- if tableidx < 64 {
- return TableSet{small: 1 << tableidx}
- }
- return TableSet{large: newLargeTableSet(0x0, tableidx)}
-}
-
-// EmptyTableSet creates an empty TableSet
-func EmptyTableSet() TableSet {
- return TableSet{small: 0}
-}
-
-// MergeTableSets merges all the given TableSet into a single one
-func MergeTableSets(tss ...TableSet) (result TableSet) {
- for _, t := range tss {
- result.MergeInPlace(t)
- }
- return
-}
-
-// TableSetFromIds returns TableSet for all the id passed in argument.
-func TableSetFromIds(tids ...int) (ts TableSet) {
- for _, tid := range tids {
- ts.AddTable(tid)
- }
- return
-}
diff --git a/go/vt/vtgate/semantics/vtable.go b/go/vt/vtgate/semantics/vtable.go
index 50d13d57193..f08e529c710 100644
--- a/go/vt/vtgate/semantics/vtable.go
+++ b/go/vt/vtgate/semantics/vtable.go
@@ -26,10 +26,11 @@ import (
// vTableInfo is used to represent projected results, not real tables. It is used for
// ORDER BY, GROUP BY and HAVING that need to access result columns
type vTableInfo struct {
- tableName string
- columnNames []string
- cols []sqlparser.Expr
- tables TableSet
+ tableName string
+ columnNames []string
+ cols []sqlparser.Expr
+ tables TableSet
+ isAuthoritative bool
}
var _ TableInfo = (*vTableInfo)(nil)
@@ -62,7 +63,7 @@ func (v *vTableInfo) matches(name sqlparser.TableName) bool {
}
func (v *vTableInfo) authoritative() bool {
- return true
+ return v.isAuthoritative
}
func (v *vTableInfo) Name() (sqlparser.TableName, error) {
@@ -89,7 +90,7 @@ func (v *vTableInfo) getColumns() []ColumnInfo {
}
func (v *vTableInfo) hasStar() bool {
- return v.tables.NumberOfTables() > 0
+ return v.tables.NonEmpty()
}
// GetTables implements the TableInfo interface
@@ -108,11 +109,12 @@ func (v *vTableInfo) getExprFor(s string) (sqlparser.Expr, error) {
}
func createVTableInfoForExpressions(expressions sqlparser.SelectExprs, tables []TableInfo, org originable) *vTableInfo {
- cols, colNames, ts := selectExprsToInfos(expressions, tables, org)
+ cols, colNames, ts, isAuthoritative := selectExprsToInfos(expressions, tables, org)
return &vTableInfo{
- columnNames: colNames,
- cols: cols,
- tables: ts,
+ columnNames: colNames,
+ cols: cols,
+ tables: ts,
+ isAuthoritative: isAuthoritative,
}
}
@@ -120,7 +122,8 @@ func selectExprsToInfos(
expressions sqlparser.SelectExprs,
tables []TableInfo,
org originable,
-) (cols []sqlparser.Expr, colNames []string, ts TableSet) {
+) (cols []sqlparser.Expr, colNames []string, ts TableSet, isAuthoritative bool) {
+ isAuthoritative = true
for _, selectExpr := range expressions {
switch expr := selectExpr.(type) {
case *sqlparser.AliasedExpr:
@@ -138,7 +141,10 @@ func selectExprsToInfos(
}
case *sqlparser.StarExpr:
for _, table := range tables {
- ts.MergeInPlace(table.getTableSet(org))
+ ts = ts.Merge(table.getTableSet(org))
+ if !table.authoritative() {
+ isAuthoritative = false
+ }
}
}
}
diff --git a/go/vt/vtgate/simplifier/simplifier.go b/go/vt/vtgate/simplifier/simplifier.go
index 5da8fe945fa..ef7be4e30e5 100644
--- a/go/vt/vtgate/simplifier/simplifier.go
+++ b/go/vt/vtgate/simplifier/simplifier.go
@@ -219,14 +219,14 @@ func removeTable(clone sqlparser.SelectStatement, searchedTS semantics.TableSet,
lft, ok := node.LeftExpr.(*sqlparser.AliasedTableExpr)
if ok {
ts := semTable.TableSetFor(lft)
- if searchedTS.Equals(ts) {
+ if searchedTS == ts {
cursor.Replace(node.RightExpr)
}
}
rgt, ok := node.RightExpr.(*sqlparser.AliasedTableExpr)
if ok {
ts := semTable.TableSetFor(rgt)
- if searchedTS.Equals(ts) {
+ if searchedTS == ts {
cursor.Replace(node.LeftExpr)
}
}
@@ -242,7 +242,7 @@ func removeTable(clone sqlparser.SelectStatement, searchedTS semantics.TableSet,
lft, ok := tbl.(*sqlparser.AliasedTableExpr)
if ok {
ts := semTable.TableSetFor(lft)
- if searchedTS.Equals(ts) {
+ if searchedTS == ts {
node.From = append(node.From[:i], node.From[i+1:]...)
return true
}
diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go
index 6b4efde2768..98867424770 100644
--- a/go/vt/vtgate/tabletgateway.go
+++ b/go/vt/vtgate/tabletgateway.go
@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"math/rand"
+ "net/http"
"sort"
"sync"
"sync/atomic"
@@ -35,6 +36,7 @@ import (
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/balancer"
"vitess.io/vitess/go/vt/vtgate/buffer"
"vitess.io/vitess/go/vt/vttablet/queryservice"
@@ -51,7 +53,13 @@ var (
bufferImplementation = "keyspace_events"
initialTabletTimeout = 30 * time.Second
// retryCount is the number of times a query will be retried on error
- retryCount = 2
+ retryCount = 2
+ routeReplicaToRdonly bool
+
+ // configuration flags for the tablet balancer
+ balancerEnabled bool
+ balancerVtgateCells []string
+ balancerKeyspaces []string
)
func init() {
@@ -60,6 +68,10 @@ func init() {
fs.StringVar(&bufferImplementation, "buffer_implementation", "keyspace_events", "Allowed values: healthcheck (legacy implementation), keyspace_events (default)")
fs.DurationVar(&initialTabletTimeout, "gateway_initial_tablet_timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type")
fs.IntVar(&retryCount, "retry-count", 2, "retry count")
+ fs.BoolVar(&routeReplicaToRdonly, "gateway_route_replica_to_rdonly", false, "route REPLICA queries to RDONLY tablets as well as REPLICA tablets")
+ fs.BoolVar(&balancerEnabled, "enable-balancer", false, "Enable the tablet balancer to evenly spread query load for a given tablet type")
+ fs.StringSliceVar(&balancerVtgateCells, "balancer-vtgate-cells", []string{}, "When in balanced mode, a comma-separated list of cells that contain vtgates (required)")
+ fs.StringSliceVar(&balancerKeyspaces, "balancer-keyspaces", []string{}, "When in balanced mode, a comma-separated list of keyspaces for which to use the balancer (optional)")
})
}
@@ -82,10 +94,17 @@ type TabletGateway struct {
// buffer, if enabled, buffers requests during a detected PRIMARY failover.
buffer *buffer.Buffer
+
+ // balancer used for routing to tablets
+ balancer balancer.TabletBalancer
}
func createHealthCheck(ctx context.Context, retryDelay, timeout time.Duration, ts *topo.Server, cell, cellsToWatch string) discovery.HealthCheck {
- return discovery.NewHealthCheck(ctx, retryDelay, timeout, ts, cell, cellsToWatch)
+ filters, err := discovery.NewVTGateHealthCheckFilters()
+ if err != nil {
+ log.Exit(err)
+ }
+ return discovery.NewHealthCheck(ctx, retryDelay, timeout, ts, cell, cellsToWatch, filters)
}
// NewTabletGateway creates and returns a new TabletGateway
@@ -110,6 +129,9 @@ func NewTabletGateway(ctx context.Context, hc discovery.HealthCheck, serv srvtop
statusAggregators: make(map[string]*TabletStatusAggregator),
}
gw.setupBuffering(ctx)
+ if balancerEnabled {
+ gw.setupBalancer(ctx)
+ }
gw.QueryService = queryservice.Wrap(nil, gw.withRetry)
return gw
}
@@ -169,6 +191,13 @@ func (gw *TabletGateway) setupBuffering(ctx context.Context) {
}
}
+func (gw *TabletGateway) setupBalancer(ctx context.Context) {
+ if len(balancerVtgateCells) == 0 {
+ log.Exitf("balancer_vtgate_cells is required for balanced mode")
+ }
+ gw.balancer = balancer.NewTabletBalancer(gw.localCell, balancerVtgateCells)
+}
+
// QueryServiceByAlias satisfies the Gateway interface
func (gw *TabletGateway) QueryServiceByAlias(alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) {
qs, err := gw.hc.TabletConnection(alias, target)
@@ -207,7 +236,7 @@ func (gw *TabletGateway) WaitForTablets(tabletTypesToWait []topodatapb.TabletTyp
}
// Finds the targets to look for.
- targets, err := srvtopo.FindAllTargets(ctx, gw.srvTopoServer, gw.localCell, tabletTypesToWait)
+ targets, err := srvtopo.FindAllTargets(ctx, gw.srvTopoServer, gw.localCell, discovery.KeyspacesToWatch, tabletTypesToWait)
if err != nil {
return err
}
@@ -234,6 +263,15 @@ func (gw *TabletGateway) CacheStatus() TabletCacheStatusList {
return res
}
+func (gw *TabletGateway) DebugBalancerHandler(w http.ResponseWriter, r *http.Request) {
+ if balancerEnabled {
+ gw.balancer.DebugHandler(w, r)
+ } else {
+ w.Header().Set("Content-Type", "text/plain")
+ w.Write([]byte("not enabled"))
+ }
+}
+
// withRetry gets available connections and executes the action. If there are retryable errors,
// it retries retryCount times before failing. It does not retry if the connection is in
// the middle of a transaction. While returning the error check if it maybe a result of
@@ -293,6 +331,20 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target,
}
tablets := gw.hc.GetHealthyTabletStats(target)
+
+ // temporary hack to enable REPLICA type queries to address both REPLICA tablets and RDONLY tablets
+ // original commit - https://github.com/tinyspeck/vitess/pull/166/commits/2552b4ce25a9fdb41ff07fa69f2ccf485fea83ac
+ // discoverygateway patch - https://github.com/slackhq/vitess/commit/47adb7c8fc720cb4cb7a090530b3e88d310ff6d3
+ if routeReplicaToRdonly && target.TabletType == topodatapb.TabletType_REPLICA {
+ // Create a new target for the same original keyspace/shard, but RDONLY tablet type.
+ rdonlyTarget := &querypb.Target{
+ Keyspace: target.Keyspace,
+ Shard: target.Shard,
+ TabletType: topodatapb.TabletType_RDONLY,
+ }
+ tablets = append(tablets, gw.hc.GetHealthyTabletStats(rdonlyTarget)...)
+ }
+
if len(tablets) == 0 {
// if we have a keyspace event watcher, check if the reason why our primary is not available is that it's currently being resharded
// or if a reparent operation is in progress.
@@ -311,7 +363,27 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target,
err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no healthy tablet available for '%s'", target.String())
break
}
- gw.shuffleTablets(gw.localCell, tablets)
+
+ // Determine whether or not to use the balancer or the standard affinity-based shuffle
+ useBalancer := false
+ if balancerEnabled {
+ if len(balancerKeyspaces) != 0 {
+ for _, keyspace := range balancerKeyspaces {
+ if keyspace == target.Keyspace {
+ useBalancer = true
+ break
+ }
+ }
+ } else {
+ useBalancer = true
+ }
+ }
+
+ if useBalancer {
+ gw.balancer.ShuffleTablets(target, tablets)
+ } else {
+ gw.shuffleTablets(gw.localCell, tablets)
+ }
var th *discovery.TabletHealth
// skip tablets we tried before
@@ -341,7 +413,11 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target,
startTime := time.Now()
var canRetry bool
- canRetry, err = inner(ctx, target, th.Conn)
+ if routeReplicaToRdonly && target.TabletType == topodatapb.TabletType_REPLICA {
+ canRetry, err = inner(ctx, th.Target, th.Conn)
+ } else {
+ canRetry, err = inner(ctx, target, th.Conn)
+ }
gw.updateStats(target, startTime, err)
if canRetry {
invalidTablets[topoproto.TabletAliasString(tabletLastUsed.Alias)] = true
diff --git a/go/vt/vtgate/tabletgateway_flaky_test.go b/go/vt/vtgate/tabletgateway_flaky_test.go
index 34bb65363be..ebad7d4aa90 100644
--- a/go/vt/vtgate/tabletgateway_flaky_test.go
+++ b/go/vt/vtgate/tabletgateway_flaky_test.go
@@ -185,7 +185,14 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) {
hc.Broadcast(primaryTablet)
// set the serving type for the primary tablet false and broadcast it so that the buffering code registers this change
hc.SetServing(primaryTablet, false)
+ // We call the broadcast twice to ensure that the change has been processed by the keyspace event watcher.
+ // The second broadcast call is blocking until the first one has been processed.
hc.Broadcast(primaryTablet)
+ hc.Broadcast(primaryTablet)
+
+ require.Len(t, tg.hc.GetHealthyTabletStats(target), 0, "GetHealthyTabletStats has tablets even though it shouldn't")
+ isNotServing := tg.kev.PrimaryIsNotServing(target)
+ require.True(t, isNotServing)
// add a result to the sandbox connection of the new primary
sbcReplica.SetResults([]*sqltypes.Result{sqlResult1})
diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go
index 5bf0e7d730f..2f225db410b 100644
--- a/go/vt/vtgate/vcursor_impl.go
+++ b/go/vt/vtgate/vcursor_impl.go
@@ -402,6 +402,19 @@ func (vc *vcursorImpl) ExecutePrimitive(ctx context.Context, primitive engine.Pr
return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "upstream shards are not available")
}
+func (vc *vcursorImpl) ExecutePrimitiveStandalone(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) {
+ // clone the vcursorImpl with a new session.
+ newVC := vc.cloneWithAutocommitSession()
+ for try := 0; try < MaxBufferingRetries; try++ {
+ res, err := primitive.TryExecute(ctx, newVC, bindVars, wantfields)
+ if err != nil && vterrors.RootCause(err) == buffer.ShardMissingError {
+ continue
+ }
+ return res, err
+ }
+ return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "upstream shards are not available")
+}
+
func (vc *vcursorImpl) StreamExecutePrimitive(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
for try := 0; try < MaxBufferingRetries; try++ {
err := primitive.TryStreamExecute(ctx, vc, bindVars, wantfields, callback)
@@ -773,6 +786,21 @@ func (vc *vcursorImpl) SetPlannerVersion(v plancontext.PlannerVersion) {
vc.safeSession.GetOrCreateOptions().PlannerVersion = v
}
+func (vc *vcursorImpl) SetPriority(priority string) {
+ if priority != "" {
+ vc.safeSession.GetOrCreateOptions().Priority = priority
+ } else if vc.safeSession.Options != nil && vc.safeSession.Options.Priority != "" {
+ vc.safeSession.Options.Priority = ""
+ }
+
+}
+
+func (vc *vcursorImpl) SetWorkloadName(workloadName string) {
+ if workloadName != "" {
+ vc.safeSession.GetOrCreateOptions().WorkloadName = workloadName
+ }
+}
+
// SetFoundRows implements the SessionActions interface
func (vc *vcursorImpl) SetFoundRows(foundRows uint64) {
vc.safeSession.FoundRows = foundRows
@@ -849,6 +877,10 @@ func (vc *vcursorImpl) SetCommitOrder(co vtgatepb.CommitOrder) {
vc.safeSession.SetCommitOrder(co)
}
+func (vc *vcursorImpl) InTransaction() bool {
+ return vc.safeSession.InTransaction()
+}
+
// GetDBDDLPluginName implements the VCursor interface
func (vc *vcursorImpl) GetDBDDLPluginName() string {
return dbDDLPlugin
@@ -1017,6 +1049,7 @@ func (vc *vcursorImpl) ReleaseLock(ctx context.Context) error {
func (vc *vcursorImpl) cloneWithAutocommitSession() *vcursorImpl {
safeSession := NewAutocommitSession(vc.safeSession.Session)
+ safeSession.logging = vc.safeSession.logging
return &vcursorImpl{
safeSession: safeSession,
keyspace: vc.keyspace,
diff --git a/go/vt/vtgate/vindexes/consistent_lookup.go b/go/vt/vtgate/vindexes/consistent_lookup.go
index 1cd372906f4..3c2166c0aaf 100644
--- a/go/vt/vtgate/vindexes/consistent_lookup.go
+++ b/go/vt/vtgate/vindexes/consistent_lookup.go
@@ -148,6 +148,10 @@ func (lu *ConsistentLookup) AllowBatch() bool {
return lu.lkp.BatchLookup
}
+func (lu *ConsistentLookup) AutoCommitEnabled() bool {
+ return lu.lkp.Autocommit
+}
+
//====================================================================
// ConsistentLookupUnique defines a vindex that uses a lookup table.
@@ -240,6 +244,10 @@ func (lu *ConsistentLookupUnique) AllowBatch() bool {
return lu.lkp.BatchLookup
}
+func (lu *ConsistentLookupUnique) AutoCommitEnabled() bool {
+ return lu.lkp.Autocommit
+}
+
//====================================================================
// clCommon defines a vindex that uses a lookup table.
diff --git a/go/vt/vtgate/vindexes/lookup.go b/go/vt/vtgate/vindexes/lookup.go
index 09c838ede4e..c2577f17a1b 100644
--- a/go/vt/vtgate/vindexes/lookup.go
+++ b/go/vt/vtgate/vindexes/lookup.go
@@ -57,6 +57,10 @@ func (ln *LookupNonUnique) AllowBatch() bool {
return ln.lkp.BatchLookup
}
+func (ln *LookupNonUnique) AutoCommitEnabled() bool {
+ return ln.lkp.Autocommit
+}
+
// String returns the name of the vindex.
func (ln *LookupNonUnique) String() string {
return ln.name
@@ -225,6 +229,10 @@ func (lu *LookupUnique) AllowBatch() bool {
return lu.lkp.BatchLookup
}
+func (lu *LookupUnique) AutoCommitEnabled() bool {
+ return lu.lkp.Autocommit
+}
+
// NewLookupUnique creates a LookupUnique vindex.
// The supplied map has the following required fields:
//
diff --git a/go/vt/vtgate/vindexes/lookup_hash.go b/go/vt/vtgate/vindexes/lookup_hash.go
index 455f3be09b9..ae838adf343 100644
--- a/go/vt/vtgate/vindexes/lookup_hash.go
+++ b/go/vt/vtgate/vindexes/lookup_hash.go
@@ -171,6 +171,10 @@ func (lh *LookupHash) AllowBatch() bool {
return lh.lkp.BatchLookup
}
+func (lh *LookupHash) AutoCommitEnabled() bool {
+ return lh.lkp.Autocommit
+}
+
// GetCommitOrder implements the LookupPlanable interface
func (lh *LookupHash) GetCommitOrder() vtgatepb.CommitOrder {
return vtgatepb.CommitOrder_NORMAL
@@ -403,6 +407,10 @@ func (lhu *LookupHashUnique) AllowBatch() bool {
return lhu.lkp.BatchLookup
}
+func (lhu *LookupHashUnique) AutoCommitEnabled() bool {
+ return lhu.lkp.Autocommit
+}
+
func (lhu *LookupHashUnique) Query() (selQuery string, arguments []string) {
return lhu.lkp.query()
}
diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go
index 884fd7c99b9..433234b82cb 100644
--- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go
+++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go
@@ -192,6 +192,10 @@ func (lh *LookupUnicodeLooseMD5Hash) AllowBatch() bool {
return lh.lkp.BatchLookup
}
+func (lh *LookupUnicodeLooseMD5Hash) AutoCommitEnabled() bool {
+ return lh.lkp.Autocommit
+}
+
// GetCommitOrder implements the LookupPlanable interface
func (lh *LookupUnicodeLooseMD5Hash) GetCommitOrder() vtgatepb.CommitOrder {
return vtgatepb.CommitOrder_NORMAL
@@ -402,6 +406,10 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) AllowBatch() bool {
return lhu.lkp.BatchLookup
}
+func (lhu *LookupUnicodeLooseMD5HashUnique) AutoCommitEnabled() bool {
+ return lhu.lkp.Autocommit
+}
+
// GetCommitOrder implements the LookupPlanable interface
func (lhu *LookupUnicodeLooseMD5HashUnique) GetCommitOrder() vtgatepb.CommitOrder {
return vtgatepb.CommitOrder_NORMAL
diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go
index dcb96e73a28..700b8e6175c 100644
--- a/go/vt/vtgate/vindexes/vindex.go
+++ b/go/vt/vtgate/vindexes/vindex.go
@@ -143,6 +143,7 @@ type (
MapResult(ids []sqltypes.Value, results []*sqltypes.Result) ([]key.Destination, error)
AllowBatch() bool
GetCommitOrder() vtgatepb.CommitOrder
+ AutoCommitEnabled() bool
}
// LookupBackfill interfaces all lookup vindexes that can backfill rows, such as LookupUnique.
diff --git a/go/vt/vtgate/vstream_manager.go b/go/vt/vtgate/vstream_manager.go
index b976da684e7..aa98f148f44 100644
--- a/go/vt/vtgate/vstream_manager.go
+++ b/go/vt/vtgate/vstream_manager.go
@@ -20,24 +20,28 @@ import (
"context"
"fmt"
"io"
+ "regexp"
"strings"
"sync"
"time"
- "vitess.io/vitess/go/vt/discovery"
- querypb "vitess.io/vitess/go/vt/proto/query"
- "vitess.io/vitess/go/vt/topo"
-
- vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
-
"google.golang.org/protobuf/proto"
+ "vitess.io/vitess/go/stats"
+ "vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/srvtopo"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
+
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/srvtopo"
- "vitess.io/vitess/go/vt/vterrors"
)
// vstreamManager manages vstream requests.
@@ -45,11 +49,21 @@ type vstreamManager struct {
resolver *srvtopo.Resolver
toposerv srvtopo.Server
cell string
+ // allowVstreamCopy will fail on vstream copy if false and no GTID provided for the stream.
+ // This is temporary until RDONLYs are properly supported for bootstrapping.
+ allowVstreamCopy bool
+
+ vstreamsCreated *stats.CountersWithMultiLabels
+ vstreamsLag *stats.GaugesWithMultiLabels
}
// maxSkewTimeoutSeconds is the maximum allowed skew between two streams when the MinimizeSkew flag is set
const maxSkewTimeoutSeconds = 10 * 60
+// tabletPickerContextTimeout is the timeout for the child context used to select candidate tablets
+// for a vstream
+const tabletPickerContextTimeout = 90 * time.Second
+
// vstream contains the metadata for one VStream request.
type vstream struct {
// mu protects parts of vgtid, the semantics of a send, and journaler.
@@ -101,11 +115,16 @@ type vstream struct {
// the timestamp of the most recent event, keyed by streamId. streamId is of the form .
timestamps map[string]int64
+ // the shard map tracking the copy completion, keyed by streamId. streamId is of the form .
+ copyCompletedShard map[string]struct{}
+
vsm *vstreamManager
eventCh chan []*binlogdatapb.VEvent
heartbeatInterval uint32
ts *topo.Server
+
+ tabletPickerOptions discovery.TabletPickerOptions
}
type journalEvent struct {
@@ -114,11 +133,22 @@ type journalEvent struct {
done chan struct{}
}
-func newVStreamManager(resolver *srvtopo.Resolver, serv srvtopo.Server, cell string) *vstreamManager {
+func newVStreamManager(resolver *srvtopo.Resolver, serv srvtopo.Server, cell string, allowVstreamCopy bool) *vstreamManager {
+ exporter := servenv.NewExporter(cell, "VStreamManager")
+
return &vstreamManager{
- resolver: resolver,
- toposerv: serv,
- cell: cell,
+ resolver: resolver,
+ toposerv: serv,
+ cell: cell,
+ allowVstreamCopy: allowVstreamCopy,
+ vstreamsCreated: exporter.NewCountersWithMultiLabels(
+ "VStreamsCreated",
+ "Number of vstreams created",
+ []string{"Keyspace", "ShardName", "TabletType"}),
+ vstreamsLag: exporter.NewGaugesWithMultiLabels(
+ "VStreamsLag",
+ "Difference between event current time and the binlog event timestamp",
+ []string{"Keyspace", "ShardName", "TabletType"}),
}
}
@@ -152,6 +182,11 @@ func (vsm *vstreamManager) VStream(ctx context.Context, tabletType topodatapb.Ta
eventCh: make(chan []*binlogdatapb.VEvent),
heartbeatInterval: flags.GetHeartbeatInterval(),
ts: ts,
+ copyCompletedShard: make(map[string]struct{}),
+ tabletPickerOptions: discovery.TabletPickerOptions{
+ CellPreference: flags.GetCellPreference(),
+ TabletOrder: flags.GetTabletOrder(),
+ },
}
return vs.stream(ctx)
}
@@ -175,31 +210,51 @@ func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodat
return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vgtid must have at least one value with a starting position")
}
// To fetch from all keyspaces, the input must contain a single ShardGtid
- // that has an empty keyspace, and the Gtid must be "current". In the
- // future, we'll allow the Gtid to be empty which will also support
- // copying of existing data.
- if len(vgtid.ShardGtids) == 1 && vgtid.ShardGtids[0].Keyspace == "" {
- if vgtid.ShardGtids[0].Gtid != "current" {
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "for an empty keyspace, the Gtid value must be 'current': %v", vgtid)
- }
- keyspaces, err := vsm.toposerv.GetSrvKeyspaceNames(ctx, vsm.cell, false)
- if err != nil {
- return nil, nil, nil, err
- }
- newvgtid := &binlogdatapb.VGtid{}
- for _, keyspace := range keyspaces {
- newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{
- Keyspace: keyspace,
- Gtid: "current",
- })
+ // that has an empty keyspace, and the Gtid must be "current".
+ // Or the input must contain a single ShardGtid that has keyspace wildcards.
+ if len(vgtid.ShardGtids) == 1 {
+ inputKeyspace := vgtid.ShardGtids[0].Keyspace
+ isEmpty := inputKeyspace == ""
+ isRegexp := strings.HasPrefix(inputKeyspace, "/")
+ if isEmpty || isRegexp {
+ newvgtid := &binlogdatapb.VGtid{}
+ keyspaces, err := vsm.toposerv.GetSrvKeyspaceNames(ctx, vsm.cell, false)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ if isEmpty {
+ if vgtid.ShardGtids[0].Gtid != "current" {
+ return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "for an empty keyspace, the Gtid value must be 'current': %v", vgtid)
+ }
+ for _, keyspace := range keyspaces {
+ newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{
+ Keyspace: keyspace,
+ Gtid: "current",
+ })
+ }
+ } else {
+ re, err := regexp.Compile(strings.Trim(inputKeyspace, "/"))
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ for _, keyspace := range keyspaces {
+ if re.MatchString(keyspace) {
+ newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{
+ Keyspace: keyspace,
+ Gtid: vgtid.ShardGtids[0].Gtid,
+ })
+ }
+ }
+ }
+ vgtid = newvgtid
}
- vgtid = newvgtid
}
newvgtid := &binlogdatapb.VGtid{}
for _, sgtid := range vgtid.ShardGtids {
if sgtid.Shard == "" {
- if sgtid.Gtid != "current" {
- return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "if shards are unspecified, the Gtid value must be 'current': %v", vgtid)
+ if sgtid.Gtid != "current" && sgtid.Gtid != "" {
+ return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "if shards are unspecified, the Gtid value must be 'current' or empty; got: %v", vgtid)
}
// TODO(sougou): this should work with the new Migrate workflow
_, _, allShards, err := vsm.resolver.GetKeyspaceShards(ctx, sgtid.Keyspace, tabletType)
@@ -431,6 +486,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
// journalDone is assigned a channel when a journal event is encountered.
// It will be closed when all journal events converge.
var journalDone chan struct{}
+ ignoreTablets := make([]*topodatapb.TabletAlias, 0)
errCount := 0
for {
@@ -448,12 +504,19 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
var eventss [][]*binlogdatapb.VEvent
var err error
cells := vs.getCells()
- tp, err := discovery.NewTabletPicker(vs.ts, cells, sgtid.Keyspace, sgtid.Shard, vs.tabletType.String())
+
+ tp, err := discovery.NewTabletPicker(ctx, vs.ts, cells, vs.vsm.cell, sgtid.Keyspace, sgtid.Shard, vs.tabletType.String(), vs.tabletPickerOptions, ignoreTablets...)
if err != nil {
log.Errorf(err.Error())
return err
}
- tablet, err := tp.PickForStreaming(ctx)
+
+ // Create a child context with a stricter timeout when picking a tablet.
+ // This will prevent hanging in the case no tablets are found.
+ tpCtx, tpCancel := context.WithTimeout(ctx, tabletPickerContextTimeout)
+ defer tpCancel()
+
+ tablet, err := tp.PickForStreaming(tpCtx)
if err != nil {
log.Errorf(err.Error())
return err
@@ -476,18 +539,23 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
go func() {
_ = tabletConn.StreamHealth(ctx, func(shr *querypb.StreamHealthResponse) error {
var err error
- if ctx.Err() != nil {
+ switch {
+ case ctx.Err() != nil:
err = fmt.Errorf("context has ended")
- } else if shr == nil || shr.RealtimeStats == nil || shr.Target == nil {
- err = fmt.Errorf("health check failed")
- } else if vs.tabletType != shr.Target.TabletType {
- err = fmt.Errorf("tablet type has changed from %s to %s, restarting vstream",
- vs.tabletType, shr.Target.TabletType)
- } else if shr.RealtimeStats.HealthError != "" {
+ case shr == nil || shr.RealtimeStats == nil || shr.Target == nil:
+ err = fmt.Errorf("health check failed on %s", topoproto.TabletAliasString(tablet.Alias))
+ case vs.tabletType != shr.Target.TabletType:
+ err = fmt.Errorf("tablet %s type has changed from %s to %s, restarting vstream",
+ topoproto.TabletAliasString(tablet.Alias), vs.tabletType, shr.Target.TabletType)
+ case shr.RealtimeStats.HealthError != "":
err = fmt.Errorf("tablet %s is no longer healthy: %s, restarting vstream",
- tablet.Alias, shr.RealtimeStats.HealthError)
+ topoproto.TabletAliasString(tablet.Alias), shr.RealtimeStats.HealthError)
+ case shr.RealtimeStats.ReplicationLagSeconds > uint32(discovery.GetLowReplicationLag().Seconds()):
+ err = fmt.Errorf("tablet %s has a replication lag of %d seconds which is beyond the value provided in --discovery_low_replication_lag of %s so the tablet is no longer considered healthy, restarting vstream",
+ topoproto.TabletAliasString(tablet.Alias), shr.RealtimeStats.ReplicationLagSeconds, discovery.GetLowReplicationLag())
}
if err != nil {
+ log.Warningf("Tablet state changed: %s, attempting to restart", err)
errCh <- err
return err
}
@@ -503,15 +571,27 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
Filter: vs.filter,
TableLastPKs: sgtid.TablePKs,
}
+ var vstreamCreatedOnce sync.Once
+
+ if !vs.vsm.allowVstreamCopy && (sgtid.Gtid == "" || len(sgtid.TablePKs) > 0) {
+ // We are attempting a vstream copy, but are not allowed (temporary until we can properly support RDONLYs for bootstrapping)
+ return vterrors.NewErrorf(vtrpc.Code_UNIMPLEMENTED, vterrors.NotSupportedYet, "vstream copy is not currently supported")
+ }
+
err = tabletConn.VStream(ctx, req, func(events []*binlogdatapb.VEvent) error {
// We received a valid event. Reset error count.
errCount = 0
+ labels := []string{sgtid.Keyspace, sgtid.Shard, req.Target.TabletType.String()}
+
+ vstreamCreatedOnce.Do(func() {
+ vs.vsm.vstreamsCreated.Add(labels, 1)
+ })
+
select {
case <-ctx.Done():
return ctx.Err()
case streamErr := <-errCh:
- log.Warningf("Tablet state changed: %s, attempting to restart", streamErr)
return vterrors.New(vtrpcpb.Code_UNAVAILABLE, streamErr.Error())
case <-journalDone:
// Unreachable.
@@ -544,6 +624,22 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
return err
}
+ if err := vs.sendAll(ctx, sgtid, eventss); err != nil {
+ return err
+ }
+ eventss = nil
+ sendevents = nil
+ case binlogdatapb.VEventType_COPY_COMPLETED:
+ sendevents = append(sendevents, event)
+ if fullyCopied, doneEvent := vs.isCopyFullyCompleted(ctx, sgtid, event); fullyCopied {
+ sendevents = append(sendevents, doneEvent)
+ }
+ eventss = append(eventss, sendevents)
+
+ if err := vs.alignStreams(ctx, event, sgtid.Keyspace, sgtid.Shard); err != nil {
+ return err
+ }
+
if err := vs.sendAll(ctx, sgtid, eventss); err != nil {
return err
}
@@ -586,6 +682,9 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
default:
sendevents = append(sendevents, event)
}
+ lag := event.CurrentTime/1e9 - event.Timestamp
+ vs.vsm.vstreamsLag.Set(labels, lag)
+
}
if len(sendevents) != 0 {
eventss = append(eventss, sendevents)
@@ -602,11 +701,18 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
// Unreachable.
err = vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "vstream ended unexpectedly")
}
- if vterrors.Code(err) != vtrpcpb.Code_FAILED_PRECONDITION && vterrors.Code(err) != vtrpcpb.Code_UNAVAILABLE {
+
+ retry, ignoreTablet := vs.shouldRetry(err)
+ if !retry {
log.Errorf("vstream for %s/%s error: %v", sgtid.Keyspace, sgtid.Shard, err)
return err
}
+ if ignoreTablet {
+ ignoreTablets = append(ignoreTablets, tablet.GetAlias())
+ }
+
errCount++
+ // Retry, at most, 3 times if the error can be retried.
if errCount >= 3 {
log.Errorf("vstream for %s/%s had three consecutive failures: %v", sgtid.Keyspace, sgtid.Shard, err)
return err
@@ -615,6 +721,31 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha
}
}
+// shouldRetry determines whether we should exit immediately or retry the vstream.
+// The first return value determines if the error can be retried, while the second
+// indicates whether the tablet with which the error occurred should be ommitted
+// from the candidate list of tablets to choose from on the retry.
+//
+// An error should be retried if it is expected to be transient.
+// A tablet should be ignored upon retry if it's likely another tablet will not
+// produce the same error.
+func (vs *vstream) shouldRetry(err error) (bool, bool) {
+ errCode := vterrors.Code(err)
+
+ if errCode == vtrpcpb.Code_FAILED_PRECONDITION || errCode == vtrpcpb.Code_UNAVAILABLE {
+ return true, false
+ }
+
+ // If there is a GTIDSet Mismatch on the tablet, omit it from the candidate
+ // list in the TabletPicker on retry.
+ if (errCode == vtrpcpb.Code_INVALID_ARGUMENT && strings.Contains(err.Error(), "GTIDSet Mismatch")) ||
+ errCode == vtrpc.Code_NOT_FOUND {
+ return true, true
+ }
+
+ return false, false
+}
+
// sendAll sends a group of events together while holding the lock.
func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, eventss [][]*binlogdatapb.VEvent) error {
vs.mu.Lock()
@@ -676,6 +807,25 @@ func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, e
return nil
}
+// isCopyFullyCompleted returns true if all stream has received a copy_completed event.
+// If true, it will also return a new copy_completed event that needs to be sent.
+// This new event represents the completion of all the copy operations.
+func (vs *vstream) isCopyFullyCompleted(ctx context.Context, sgtid *binlogdatapb.ShardGtid, event *binlogdatapb.VEvent) (bool, *binlogdatapb.VEvent) {
+ vs.mu.Lock()
+ defer vs.mu.Unlock()
+
+ vs.copyCompletedShard[fmt.Sprintf("%s/%s", event.Keyspace, event.Shard)] = struct{}{}
+
+ for _, shard := range vs.vgtid.ShardGtids {
+ if _, ok := vs.copyCompletedShard[fmt.Sprintf("%s/%s", shard.Keyspace, shard.Shard)]; !ok {
+ return false, nil
+ }
+ }
+ return true, &binlogdatapb.VEvent{
+ Type: binlogdatapb.VEventType_COPY_COMPLETED,
+ }
+}
+
func (vs *vstream) getError() error {
vs.errMu.Lock()
defer vs.errMu.Unlock()
diff --git a/go/vt/vtgate/vstream_manager_test.go b/go/vt/vtgate/vstream_manager_test.go
index d71008bc6ae..3c7aea63bc2 100644
--- a/go/vt/vtgate/vstream_manager_test.go
+++ b/go/vt/vtgate/vstream_manager_test.go
@@ -24,26 +24,27 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/sync2"
-
- "vitess.io/vitess/go/vt/topo"
-
- vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/vterrors"
-
- "vitess.io/vitess/go/stats"
- "vitess.io/vitess/go/vt/vttablet/sandboxconn"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
+ "vitess.io/vitess/go/stats"
+ "vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/discovery"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/proto/binlogdata"
+ "vitess.io/vitess/go/vt/srvtopo"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vttablet/sandboxconn"
+
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- "vitess.io/vitess/go/vt/srvtopo"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
var mu sync.Mutex
@@ -89,7 +90,7 @@ func TestVStreamSkew(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"})
- vsm := newTestVStreamManager(hc, st, cell)
+ vsm := newTestVStreamManager(hc, st, cell, true)
vgtid := &binlogdatapb.VGtid{ShardGtids: []*binlogdatapb.ShardGtid{}}
want := int64(0)
var sbc0, sbc1 *sandboxconn.SandboxConn
@@ -135,7 +136,7 @@ func TestVStreamEvents(t *testing.T) {
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20"})
- vsm := newTestVStreamManager(hc, st, cell)
+ vsm := newTestVStreamManager(hc, st, cell, true)
sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
@@ -212,7 +213,7 @@ func TestVStreamChunks(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"})
- vsm := newTestVStreamManager(hc, st, cell)
+ vsm := newTestVStreamManager(hc, st, cell, true)
sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
sbc1 := hc.AddTestTablet("aa", "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil)
@@ -282,7 +283,7 @@ func TestVStreamMulti(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"})
- vsm := newTestVStreamManager(hc, st, "aa")
+ vsm := newTestVStreamManager(hc, st, "aa", true)
sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil)
@@ -336,47 +337,192 @@ func TestVStreamMulti(t *testing.T) {
}
}
-func TestVStreamRetry(t *testing.T) {
+func TestVStreamsCreatedAndLagMetrics(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
-
cell := "aa"
ks := "TestVStream"
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
-
- st := getSandboxTopo(ctx, cell, ks, []string{"-20"})
- vsm := newTestVStreamManager(hc, st, "aa")
+ st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"})
+ vsm := newTestVStreamManager(hc, st, cell, true)
+ vsm.vstreamsCreated.ResetAll()
+ vsm.vstreamsLag.ResetAll()
sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
- commit := []*binlogdatapb.VEvent{
- {Type: binlogdatapb.VEventType_COMMIT},
+ sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil)
+ addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet())
+
+ send0 := []*binlogdatapb.VEvent{
+ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"},
+ {Type: binlogdatapb.VEventType_COMMIT, Timestamp: 10, CurrentTime: 15 * 1e9},
}
- sbc0.AddVStreamEvents(commit, nil)
- sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "aa"))
- sbc0.AddVStreamEvents(commit, nil)
- sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "bb"))
- sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cc"))
- sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "final error"))
- var count sync2.AtomicInt32
- count.Set(0)
+ sbc0.AddVStreamEvents(send0, nil)
+
+ send1 := []*binlogdatapb.VEvent{
+ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid02"},
+ {Type: binlogdatapb.VEventType_COMMIT, Timestamp: 10, CurrentTime: 17 * 1e9},
+ }
+ sbc1.AddVStreamEvents(send1, nil)
+
vgtid := &binlogdatapb.VGtid{
ShardGtids: []*binlogdatapb.ShardGtid{{
Keyspace: ks,
Shard: "-20",
Gtid: "pos",
+ }, {
+ Keyspace: ks,
+ Shard: "20-40",
+ Gtid: "pos",
}},
}
- err := vsm.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error {
- count.Add(1)
- return nil
- })
- wantErr := "final error"
- if err == nil || !strings.Contains(err.Error(), wantErr) {
- t.Errorf("vstream end: %v, must contain %v", err.Error(), wantErr)
+ ch := startVStream(ctx, t, vsm, vgtid, nil)
+ <-ch
+ <-ch
+ wantVStreamsCreated := make(map[string]int64)
+ wantVStreamsCreated["TestVStream.-20.PRIMARY"] = 1
+ wantVStreamsCreated["TestVStream.20-40.PRIMARY"] = 1
+ assert.Equal(t, wantVStreamsCreated, vsm.vstreamsCreated.Counts(), "vstreamsCreated matches")
+
+ wantVStreamsLag := make(map[string]int64)
+ wantVStreamsLag["TestVStream.-20.PRIMARY"] = 5
+ wantVStreamsLag["TestVStream.20-40.PRIMARY"] = 7
+ assert.Equal(t, wantVStreamsLag, vsm.vstreamsLag.Counts(), "vstreamsLag matches")
+}
+
+func TestVStreamRetriableErrors(t *testing.T) {
+ type testCase struct {
+ name string
+ code vtrpcpb.Code
+ msg string
+ shouldRetry bool
+ ignoreTablet bool
+ }
+
+ tcases := []testCase{
+ {
+ name: "failed precondition",
+ code: vtrpcpb.Code_FAILED_PRECONDITION,
+ msg: "",
+ shouldRetry: true,
+ ignoreTablet: false,
+ },
+ {
+ name: "gtid mismatch",
+ code: vtrpcpb.Code_INVALID_ARGUMENT,
+ msg: "GTIDSet Mismatch aa",
+ shouldRetry: true,
+ ignoreTablet: true,
+ },
+ {
+ name: "unavailable",
+ code: vtrpcpb.Code_UNAVAILABLE,
+ msg: "",
+ shouldRetry: true,
+ ignoreTablet: false,
+ },
+ {
+ name: "should not retry",
+ code: vtrpcpb.Code_INVALID_ARGUMENT,
+ msg: "final error",
+ shouldRetry: false,
+ ignoreTablet: false,
+ },
+ {
+ name: "not found",
+ code: vtrpcpb.Code_NOT_FOUND,
+ msg: "",
+ shouldRetry: true,
+ ignoreTablet: true,
+ },
+ }
+
+ commit := []*binlogdatapb.VEvent{
+ {Type: binlogdatapb.VEventType_COMMIT},
+ }
+
+ want := &binlogdatapb.VStreamResponse{Events: commit}
+
+ for _, tcase := range tcases {
+ t.Run(tcase.name, func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // aa will be the local cell for this test, but that tablet will have a vstream error.
+ cells := []string{"aa", "ab"}
+
+ ks := "TestVStream"
+ _ = createSandbox(ks)
+ hc := discovery.NewFakeHealthCheck(nil)
+
+ st := getSandboxTopoMultiCell(cells, ks, []string{"-20"})
+
+ sbc0 := hc.AddTestTablet(cells[0], "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_REPLICA, true, 1, nil)
+ sbc1 := hc.AddTestTablet(cells[1], "1.1.1.1", 1002, ks, "-20", topodatapb.TabletType_REPLICA, true, 1, nil)
+
+ addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
+ addTabletToSandboxTopo(t, st, ks, "-20", sbc1.Tablet())
+
+ vsm := newTestVStreamManager(hc, st, cells[0], true)
+
+ // Always have the local cell tablet error so it's ignored on retry and we pick the other one
+ // if the error requires ignoring the tablet on retry.
+ sbc0.AddVStreamEvents(nil, vterrors.Errorf(tcase.code, tcase.msg))
+
+ if tcase.ignoreTablet {
+ sbc1.AddVStreamEvents(commit, nil)
+ } else {
+ sbc0.AddVStreamEvents(commit, nil)
+ }
+
+ vgtid := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{{
+ Keyspace: ks,
+ Shard: "-20",
+ Gtid: "pos",
+ }},
+ }
+
+ ch := make(chan *binlogdatapb.VStreamResponse)
+ done := make(chan struct{})
+ go func() {
+ err := vsm.VStream(ctx, topodatapb.TabletType_REPLICA, vgtid, nil, &vtgatepb.VStreamFlags{Cells: strings.Join(cells, ",")}, func(events []*binlogdatapb.VEvent) error {
+ ch <- &binlogdatapb.VStreamResponse{Events: events}
+ return nil
+ })
+ wantErr := "context canceled"
+
+ if !tcase.shouldRetry {
+ wantErr = tcase.msg
+ }
+
+ if err == nil || !strings.Contains(err.Error(), wantErr) {
+ t.Errorf("vstream end: %v, must contain %v", err.Error(), wantErr)
+ }
+ close(done)
+ }()
+
+ Loop:
+ for {
+ if tcase.shouldRetry {
+ select {
+ case event := <-ch:
+ got := event
+ if !proto.Equal(got, want) {
+ t.Errorf("got different vstream event than expected")
+ }
+ cancel()
+ case <-done:
+ // The goroutine has completed, so break out of the loop
+ break Loop
+ }
+ } else {
+ <-done
+ break Loop
+ }
+ }
+ })
}
- time.Sleep(100 * time.Millisecond) // wait for goroutine within VStream to finish
- assert.Equal(t, int32(2), count.Get())
}
func TestVStreamShouldNotSendSourceHeartbeats(t *testing.T) {
@@ -387,7 +533,7 @@ func TestVStreamShouldNotSendSourceHeartbeats(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20"})
- vsm := newTestVStreamManager(hc, st, cell)
+ vsm := newTestVStreamManager(hc, st, cell, true)
sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
@@ -437,7 +583,7 @@ func TestVStreamJournalOneToMany(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"})
- vsm := newTestVStreamManager(hc, st, "aa")
+ vsm := newTestVStreamManager(hc, st, "aa", true)
sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil)
@@ -550,7 +696,7 @@ func TestVStreamJournalManyToOne(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"})
- vsm := newTestVStreamManager(hc, st, cell)
+ vsm := newTestVStreamManager(hc, st, cell, true)
sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil)
@@ -667,7 +813,7 @@ func TestVStreamJournalNoMatch(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20"})
- vsm := newTestVStreamManager(hc, st, "aa")
+ vsm := newTestVStreamManager(hc, st, "aa", true)
sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
@@ -796,7 +942,7 @@ func TestVStreamJournalPartialMatch(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"})
- vsm := newTestVStreamManager(hc, st, "aa")
+ vsm := newTestVStreamManager(hc, st, "aa", true)
sbc1 := hc.AddTestTablet("aa", "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet())
sbc2 := hc.AddTestTablet("aa", "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
@@ -876,7 +1022,7 @@ func TestResolveVStreamParams(t *testing.T) {
name := "TestVStream"
_ = createSandbox(name)
hc := discovery.NewFakeHealthCheck(nil)
- vsm := newTestVStreamManager(hc, newSandboxForCells([]string{"aa"}), "aa")
+ vsm := newTestVStreamManager(hc, newSandboxForCells([]string{"aa"}), "aa", true)
testcases := []struct {
input *binlogdatapb.VGtid
output *binlogdatapb.VGtid
@@ -893,9 +1039,44 @@ func TestResolveVStreamParams(t *testing.T) {
input: &binlogdatapb.VGtid{
ShardGtids: []*binlogdatapb.ShardGtid{{
Keyspace: "TestVStream",
+ Gtid: "other",
+ }},
+ },
+ err: "if shards are unspecified, the Gtid value must be 'current' or empty",
+ }, {
+ // Verify that the function maps the input missing the shard to a list of all shards in the topology.
+ input: &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{{
+ Keyspace: "TestVStream",
+ }},
+ },
+ output: &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{{
+ Keyspace: "TestVStream",
+ Shard: "-20",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "20-40",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "40-60",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "60-80",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "80-a0",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "a0-c0",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "c0-e0",
+ }, {
+ Keyspace: "TestVStream",
+ Shard: "e0-",
}},
},
- err: "if shards are unspecified, the Gtid value must be 'current'",
}, {
input: &binlogdatapb.VGtid{
ShardGtids: []*binlogdatapb.ShardGtid{{
@@ -987,17 +1168,49 @@ func TestResolveVStreamParams(t *testing.T) {
assert.Equal(t, wantFilter, filter, tcase.input)
require.False(t, flags.MinimizeSkew)
}
- // Special-case: empty keyspace because output is too big.
- input := &binlogdatapb.VGtid{
- ShardGtids: []*binlogdatapb.ShardGtid{{
- Gtid: "current",
- }},
+
+ // Special-case: empty keyspace or keyspace containing wildcards because output is too big.
+ // Verify that the function resolves input for multiple keyspaces into a list of all corresponding shards.
+ // Ensure that the number of shards returned is greater than the number of shards in a single keyspace named 'TestVStream.'
+ specialCases := []struct {
+ input *binlogdatapb.ShardGtid
+ }{
+ {
+ input: &binlogdatapb.ShardGtid{
+ Gtid: "current",
+ },
+ },
+ {
+ input: &binlogdatapb.ShardGtid{
+ Keyspace: "/.*",
+ },
+ },
+ {
+ input: &binlogdatapb.ShardGtid{
+ Keyspace: "/.*",
+ Gtid: "current",
+ },
+ },
+ {
+ input: &binlogdatapb.ShardGtid{
+ Keyspace: "/Test.*",
+ },
+ },
}
- vgtid, _, _, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, input, nil, nil)
- require.NoError(t, err, input)
- if got, want := len(vgtid.ShardGtids), 8; want >= got {
- t.Errorf("len(vgtid.ShardGtids): %v, must be >%d", got, want)
+ for _, tcase := range specialCases {
+ input := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{tcase.input},
+ }
+ vgtid, _, _, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, input, nil, nil)
+ require.NoError(t, err, tcase.input)
+ if got, expectTestVStreamShardNumber := len(vgtid.ShardGtids), 8; expectTestVStreamShardNumber >= got {
+ t.Errorf("len(vgtid.ShardGtids): %v, must be >%d", got, expectTestVStreamShardNumber)
+ }
+ for _, s := range vgtid.ShardGtids {
+ require.Equal(t, tcase.input.Gtid, s.Gtid)
+ }
}
+
for _, minimizeSkew := range []bool{true, false} {
t.Run(fmt.Sprintf("resolveParams MinimizeSkew %t", minimizeSkew), func(t *testing.T) {
flags := &vtgatepb.VStreamFlags{MinimizeSkew: minimizeSkew}
@@ -1022,7 +1235,7 @@ func TestVStreamIdleHeartbeat(t *testing.T) {
_ = createSandbox(ks)
hc := discovery.NewFakeHealthCheck(nil)
st := getSandboxTopo(ctx, cell, ks, []string{"-20"})
- vsm := newTestVStreamManager(hc, st, cell)
+ vsm := newTestVStreamManager(hc, st, cell, true)
sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
vgtid := &binlogdatapb.VGtid{
@@ -1071,10 +1284,178 @@ func TestVStreamIdleHeartbeat(t *testing.T) {
}
}
-func newTestVStreamManager(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager {
+func TestVstreamCopy(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ cell := "aa"
+ ks := "TestVStreamCopy"
+ _ = createSandbox(ks)
+ hc := discovery.NewFakeHealthCheck(nil)
+
+ st := getSandboxTopo(ctx, cell, ks, []string{"-20"})
+ sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil)
+ addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet())
+ commit := []*binlogdatapb.VEvent{
+ {Type: binlogdatapb.VEventType_COMMIT},
+ }
+ sbc0.AddVStreamEvents(commit, nil)
+ sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "aa"))
+ sbc0.AddVStreamEvents(commit, nil)
+ sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "bb"))
+ sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cc"))
+ sbc0.AddVStreamEvents(nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "final error"))
+ var count sync2.AtomicInt32
+ count.Set(0)
+ // empty gtid id means no start position = bootstrapping/vstream copy
+ vgtid := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{{
+ Keyspace: ks,
+ Shard: "-20",
+ Gtid: "",
+ }},
+ }
+
+ // allowVstreamCopy = false
+ vsm := newTestVStreamManager(hc, st, "aa", false)
+ err := vsm.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error {
+ count.Add(1)
+ return nil
+ })
+ require.Error(t, err)
+ require.Equal(t, "vstream copy is not currently supported", err.Error())
+
+ // allowVstreamCopy = true
+ vsm2 := newTestVStreamManager(hc, st, "aa", true)
+ err = vsm2.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, nil, &vtgatepb.VStreamFlags{}, func(events []*binlogdatapb.VEvent) error {
+ count.Add(1)
+ return nil
+ })
+ require.Equal(t, "target: TestVStreamCopy.-20.primary: final error", err.Error())
+}
+
+// TestVStreamManagerHealthCheckResponseHandling tests the handling of healthcheck responses by
+// the vstream manager to confirm that we are correctly restarting the vstream when we should.
+func TestVStreamManagerHealthCheckResponseHandling(t *testing.T) {
+ // Capture the vstream warning log. Otherwise we need to re-implement the vstream error
+ // handling in SandboxConn's implementation and then we're not actually testing the
+ // production code.
+ logger := logutil.NewMemoryLogger()
+ log.Warningf = logger.Warningf
+
+ cell := "aa"
+ ks := "TestVStream"
+ shard := "0"
+ tabletType := topodatapb.TabletType_REPLICA
+ _ = createSandbox(ks)
+ hc := discovery.NewFakeHealthCheck(nil)
+ st := getSandboxTopo(ctx, cell, ks, []string{shard})
+ vsm := newTestVStreamManager(hc, st, cell, true)
+ vgtid := &binlogdatapb.VGtid{
+ ShardGtids: []*binlogdatapb.ShardGtid{{
+ Keyspace: ks,
+ Shard: shard,
+ }},
+ }
+ source := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, shard, tabletType, true, 0, nil)
+ tabletAlias := topoproto.TabletAliasString(source.Tablet().Alias)
+ addTabletToSandboxTopo(t, st, ks, shard, source.Tablet())
+ target := &querypb.Target{
+ Cell: cell,
+ Keyspace: ks,
+ Shard: shard,
+ TabletType: tabletType,
+ }
+ highLag := uint32(discovery.GetLowReplicationLag().Seconds()) + 1
+
+ type testcase struct {
+ name string
+ hcRes *querypb.StreamHealthResponse
+ wantErr string
+ }
+ testcases := []testcase{
+ {
+ name: "all healthy", // Will hit the context timeout
+ },
+ {
+ name: "failure",
+ hcRes: &querypb.StreamHealthResponse{
+ TabletAlias: source.Tablet().Alias,
+ Target: nil, // This is seen as a healthcheck stream failure
+ },
+ wantErr: fmt.Sprintf("health check failed on %s", tabletAlias),
+ },
+ {
+ name: "tablet type changed",
+ hcRes: &querypb.StreamHealthResponse{
+ TabletAlias: source.Tablet().Alias,
+ Target: &querypb.Target{
+ Cell: cell,
+ Keyspace: ks,
+ Shard: shard,
+ TabletType: topodatapb.TabletType_PRIMARY,
+ },
+ RealtimeStats: &querypb.RealtimeStats{},
+ },
+ wantErr: fmt.Sprintf("tablet %s type has changed from %s to %s",
+ tabletAlias, tabletType, topodatapb.TabletType_PRIMARY.String()),
+ },
+ {
+ name: "unhealthy",
+ hcRes: &querypb.StreamHealthResponse{
+ TabletAlias: source.Tablet().Alias,
+ Target: target,
+ RealtimeStats: &querypb.RealtimeStats{
+ HealthError: "unhealthy",
+ },
+ },
+ wantErr: fmt.Sprintf("tablet %s is no longer healthy", tabletAlias),
+ },
+ {
+ name: "replication lag too high",
+ hcRes: &querypb.StreamHealthResponse{
+ TabletAlias: source.Tablet().Alias,
+ Target: target,
+ RealtimeStats: &querypb.RealtimeStats{
+ ReplicationLagSeconds: highLag,
+ },
+ },
+ wantErr: fmt.Sprintf("%s has a replication lag of %d seconds which is beyond the value provided",
+ tabletAlias, highLag),
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ done := make(chan struct{})
+ go func() {
+ sctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ defer cancel()
+ defer close(done)
+ // SandboxConn's VStream implementation always waits for the context to timeout.
+ err := vsm.VStream(sctx, tabletType, vgtid, nil, nil, func(events []*binlogdatapb.VEvent) error {
+ require.Fail(t, "unexpected event", "Received unexpected events: %v", events)
+ return nil
+ })
+ if tc.wantErr != "" { // Otherwise we simply expect the context to timeout
+ if !strings.Contains(logger.String(), tc.wantErr) {
+ require.Fail(t, "unexpected vstream error", "vstream ended with error: %v, which did not contain: %s", err, tc.wantErr)
+ }
+ }
+ }()
+ if tc.wantErr != "" {
+ source.SetStreamHealthResponse(tc.hcRes)
+ }
+ <-done
+ logger.Clear()
+ })
+ }
+}
+
+func newTestVStreamManager(hc discovery.HealthCheck, serv srvtopo.Server, cell string, allowVstreamCopy bool) *vstreamManager {
gw := NewTabletGateway(context.Background(), hc, serv, cell)
srvResolver := srvtopo.NewResolver(serv, gw, cell)
- return newVStreamManager(srvResolver, serv, cell)
+ return newVStreamManager(srvResolver, serv, cell, allowVstreamCopy)
}
func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid *binlogdatapb.VGtid, flags *vtgatepb.VStreamFlags) <-chan *binlogdatapb.VStreamResponse {
@@ -1094,7 +1475,8 @@ func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid
func verifyEvents(t *testing.T, ch <-chan *binlogdatapb.VStreamResponse, wants ...*binlogdatapb.VStreamResponse) {
t.Helper()
for i, want := range wants {
- got := <-ch
+ val := <-ch
+ got := proto.Clone(val).(*binlogdatapb.VStreamResponse)
require.NotNil(t, got)
for _, event := range got.Events {
event.Timestamp = 0
@@ -1143,6 +1525,22 @@ func getSandboxTopo(ctx context.Context, cell string, keyspace string, shards []
return st
}
+func getSandboxTopoMultiCell(cells []string, keyspace string, shards []string) *sandboxTopo {
+ st := newSandboxForCells(cells)
+ ts := st.topoServer
+
+ for _, cell := range cells {
+ ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{})
+ }
+
+ ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{})
+
+ for _, shard := range shards {
+ ts.CreateShard(ctx, keyspace, shard)
+ }
+ return st
+}
+
func addTabletToSandboxTopo(t *testing.T, st *sandboxTopo, ks, shard string, tablet *topodatapb.Tablet) {
_, err := st.topoServer.UpdateShardFields(ctx, ks, shard, func(si *topo.ShardInfo) error {
si.PrimaryAlias = tablet.Alias
diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go
index 8d969f4ab91..87f5c696ae5 100644
--- a/go/vt/vtgate/vtgate.go
+++ b/go/vt/vtgate/vtgate.go
@@ -20,9 +20,11 @@ package vtgate
import (
"context"
+ "errors"
"fmt"
"net/http"
"os"
+ "regexp"
"strings"
"time"
@@ -72,6 +74,7 @@ var (
warnPayloadSize int
noScatter bool
+ noVstreamCopy bool
enableShardRouting bool
// TODO(deepthi): change these two vars to unexported and move to healthcheck.go when LegacyHealthcheck is removed
@@ -114,6 +117,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.StringVar(&defaultDDLStrategy, "ddl_strategy", defaultDDLStrategy, "Set default strategy for DDL statements. Override with @@ddl_strategy session variable")
fs.StringVar(&dbDDLPlugin, "dbddl_plugin", dbDDLPlugin, "controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service")
fs.BoolVar(&noScatter, "no_scatter", noScatter, "when set to true, the planner will fail instead of producing a plan that includes scatter queries")
+ fs.BoolVar(&noVstreamCopy, "no_vstream_copy", noVstreamCopy, "when set to true, vstream copy will not be allowed - temporary until we can properly support RDONLY for this")
fs.BoolVar(&enableShardRouting, "enable-partial-keyspace-migration", enableShardRouting, "(Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false)")
fs.DurationVar(&healthCheckRetryDelay, "healthcheck_retry_delay", healthCheckRetryDelay, "health check retry delay")
fs.DurationVar(&healthCheckTimeout, "healthcheck_timeout", healthCheckTimeout, "the health check timeout period")
@@ -180,9 +184,10 @@ type VTGate struct {
// stats objects.
// TODO(sougou): This needs to be cleaned up. There
// are global vars that depend on this member var.
- timings *stats.MultiTimings
- rowsReturned *stats.CountersWithMultiLabels
- rowsAffected *stats.CountersWithMultiLabels
+ timings *stats.MultiTimings
+ rowsReturned *stats.CountersWithMultiLabels
+ rowsAffected *stats.CountersWithMultiLabels
+ queryTextCharsProcessed *stats.CountersWithMultiLabels
// the throttled loggers for all errors, one per API entry
logExecute *logutil.ThrottledLogger
@@ -244,7 +249,7 @@ func Init(
sc := NewScatterConn("VttabletCall", tc, gw)
srvResolver := srvtopo.NewResolver(serv, gw, cell)
resolver := NewResolver(srvResolver, serv, cell, sc)
- vsm := newVStreamManager(srvResolver, serv, cell)
+ vsm := newVStreamManager(srvResolver, serv, cell, !noVstreamCopy)
var si SchemaInfo // default nil
var st *vtschema.Tracker
@@ -272,6 +277,7 @@ func Init(
si,
noScatter,
pv,
+ noVstreamCopy,
)
// connect the schema tracker with the vschema manager
@@ -299,6 +305,10 @@ func Init(
"VtgateApiRowsAffected",
"Rows affected by a write (DML) operation through the VTgate API",
[]string{"Operation", "Keyspace", "DbType"}),
+ queryTextCharsProcessed: stats.NewCountersWithMultiLabels(
+ "VtgateQueryTextCharactersProcessed",
+ "Query text characters processed through the VTGate API",
+ []string{"Operation", "Keyspace", "DbType"}),
logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second),
logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second),
@@ -332,6 +342,7 @@ func Init(
})
rpcVTGate.registerDebugHealthHandler()
rpcVTGate.registerDebugEnvHandler()
+ rpcVTGate.registerDebugBalancerHandler()
err := initQueryLogger(rpcVTGate)
if err != nil {
log.Fatalf("error initializing query logger: %v", err)
@@ -400,6 +411,12 @@ func (vtg *VTGate) registerDebugHealthHandler() {
})
}
+func (vtg *VTGate) registerDebugBalancerHandler() {
+ http.HandleFunc("/debug/balancer", func(w http.ResponseWriter, r *http.Request) {
+ vtg.Gateway().DebugBalancerHandler(w, r)
+ })
+}
+
// IsHealthy returns nil if server is healthy.
// Otherwise, it returns an error indicating the reason.
func (vtg *VTGate) IsHealthy() error {
@@ -428,6 +445,7 @@ func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql s
if err == nil {
vtg.rowsReturned.Add(statsKey, int64(len(qr.Rows)))
vtg.rowsAffected.Add(statsKey, int64(qr.RowsAffected))
+ vtg.queryTextCharsProcessed.Add(statsKey, int64(len(sql)))
return session, qr, nil
}
@@ -588,6 +606,12 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any
ec.String(),
}
+ if terseErrors {
+ regexpBv := regexp.MustCompile(`BindVars: \{.*\}`)
+ str := regexpBv.ReplaceAllString(err.Error(), "BindVars: {REDACTED}")
+ err = errors.New(str)
+ }
+
// Traverse the request structure and truncate any long values
request = truncateErrorStrings(request)
diff --git a/go/vt/vtgr/config/vtgr_config.go b/go/vt/vtgr/config/vtgr_config.go
index 3f66e0dcfc1..4b4063f6460 100644
--- a/go/vt/vtgr/config/vtgr_config.go
+++ b/go/vt/vtgr/config/vtgr_config.go
@@ -104,8 +104,8 @@ type Configuration struct {
MySQLTopologyUseMutualTLS bool // Turn on TLS authentication with the Topology MySQL instances
MySQLTopologyUseMixedTLS bool // Mixed TLS and non-TLS authentication with the Topology MySQL instances
TLSCacheTTLFactor uint // Factor of InstancePollSeconds that we set as TLS info cache expiry
- BackendDB string // EXPERIMENTAL: type of backend db; either "mysql" or "sqlite3"
- SQLite3DataFile string // when BackendDB == "sqlite3", full path to sqlite3 datafile
+ BackendDB string // EXPERIMENTAL: type of backend db; either "mysql" or "sqlite"
+ SQLite3DataFile string // when BackendDB == "sqlite", full path to sqlite3 datafile
SkipOrchestratorDatabaseUpdate bool // When true, do not check backend database schema nor attempt to update it. Useful when you may be running multiple versions of orchestrator, and you only wish certain boxes to dictate the db structure (or else any time a different orchestrator version runs it will rebuild database schema)
PanicIfDifferentDatabaseDeploy bool // When true, and this process finds the orchestrator backend DB was provisioned by a different version, panic
RaftEnabled bool // When true, setup orchestrator in a raft consensus layout. When false (default) all Raft* variables are ignored
@@ -477,7 +477,7 @@ func (config *Configuration) postReadAdjustments() error {
}
if config.IsSQLite() && config.SQLite3DataFile == "" {
- return fmt.Errorf("SQLite3DataFile must be set when BackendDB is sqlite3")
+ return fmt.Errorf("SQLite3DataFile must be set when BackendDB is sqlite")
}
if config.RaftEnabled && config.RaftDataDir == "" {
return fmt.Errorf("RaftDataDir must be defined since raft is enabled (RaftEnabled)")
diff --git a/go/vt/vtgr/db/db.go b/go/vt/vtgr/db/db.go
index a8736596b07..f9a0ab2b478 100644
--- a/go/vt/vtgr/db/db.go
+++ b/go/vt/vtgr/db/db.go
@@ -27,9 +27,9 @@ import (
"sync"
"time"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtgr/config"
- "vitess.io/vitess/go/vt/vtgr/external/golib/sqlutils"
)
var (
diff --git a/go/vt/vtgr/db/mysql.go b/go/vt/vtgr/db/mysql.go
index 1b6222d3223..8c3787c9187 100644
--- a/go/vt/vtgr/db/mysql.go
+++ b/go/vt/vtgr/db/mysql.go
@@ -26,11 +26,12 @@ import (
gouuid "github.com/google/uuid"
"github.com/spf13/pflag"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtgr/config"
- "vitess.io/vitess/go/vt/vtgr/external/golib/sqlutils"
"vitess.io/vitess/go/vt/vtgr/inst"
)
diff --git a/go/vt/vtgr/db/tls.go b/go/vt/vtgr/db/tls.go
index 67aeaa9b25f..62dcf028c0d 100644
--- a/go/vt/vtgr/db/tls.go
+++ b/go/vt/vtgr/db/tls.go
@@ -26,6 +26,7 @@ import (
"strings"
"time"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"github.com/go-sql-driver/mysql"
@@ -33,7 +34,6 @@ import (
"github.com/rcrowley/go-metrics"
"vitess.io/vitess/go/vt/vtgr/config"
- "vitess.io/vitess/go/vt/vtgr/external/golib/sqlutils"
"vitess.io/vitess/go/vt/vtgr/ssl"
)
diff --git a/go/vt/vtorc/attributes/attributes.go b/go/vt/vtorc/attributes/attributes.go
deleted file mode 100644
index 466f57c93d7..00000000000
--- a/go/vt/vtorc/attributes/attributes.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- Copyright 2014 Outbrain Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package attributes
-
-// HostAttributes presents attributes submitted by a host
-type HostAttributes struct {
- Hostname string
- AttributeName string
- AttributeValue string
- SubmitTimestamp string
- ExpireTimestamp string
-}
diff --git a/go/vt/vtorc/attributes/attributes_dao.go b/go/vt/vtorc/attributes/attributes_dao.go
deleted file mode 100644
index 503d4e4d22c..00000000000
--- a/go/vt/vtorc/attributes/attributes_dao.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- Copyright 2014 Outbrain Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package attributes
-
-import (
- "fmt"
-
- "vitess.io/vitess/go/vt/log"
-
- "github.com/openark/golib/sqlutils"
-
- "vitess.io/vitess/go/vt/vtorc/db"
-)
-
-// SetHostAttributes is used to set host attributes
-func SetHostAttributes(hostname string, attributeName string, attributeValue string) error {
- _, err := db.ExecVTOrc(`
- replace
- into host_attributes (
- hostname, attribute_name, attribute_value, submit_timestamp, expire_timestamp
- ) VALUES (
- ?, ?, ?, NOW(), NULL
- )
- `,
- hostname,
- attributeName,
- attributeValue,
- )
- if err != nil {
- log.Error(err)
- return err
- }
-
- return err
-}
-
-func getHostAttributesByClause(whereClause string, args []any) ([]HostAttributes, error) {
- var res []HostAttributes
- query := fmt.Sprintf(`
- select
- hostname,
- attribute_name,
- attribute_value,
- submit_timestamp ,
- ifnull(expire_timestamp, '') as expire_timestamp
- from
- host_attributes
- %s
- order by
- hostname, attribute_name
- `, whereClause)
-
- err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error {
- hostAttributes := HostAttributes{}
- hostAttributes.Hostname = m.GetString("hostname")
- hostAttributes.AttributeName = m.GetString("attribute_name")
- hostAttributes.AttributeValue = m.GetString("attribute_value")
- hostAttributes.SubmitTimestamp = m.GetString("submit_timestamp")
- hostAttributes.ExpireTimestamp = m.GetString("expire_timestamp")
-
- res = append(res, hostAttributes)
- return nil
- })
-
- if err != nil {
- log.Error(err)
- }
- return res, err
-}
-
-// GetHostAttribute expects to return a single attribute for a given hostname/attribute-name combination
-// or error on empty result
-func GetHostAttribute(hostname string, attributeName string) (string, error) {
- whereClause := `where hostname=? and attribute_name=?`
- attributes, err := getHostAttributesByClause(whereClause, sqlutils.Args(hostname, attributeName))
- if err != nil {
- return "", err
- }
- if len(attributeName) == 0 {
- log.Errorf("No attribute found for %+v, %+v", hostname, attributeName)
- return "", fmt.Errorf("No attribute found for %+v, %+v", hostname, attributeName)
- }
- return attributes[0].AttributeValue, nil
-}
-
-// SetGeneralAttribute sets an attribute not associated with a host. Its a key-value thing
-func SetGeneralAttribute(attributeName string, attributeValue string) error {
- if attributeName == "" {
- return nil
- }
- return SetHostAttributes("*", attributeName, attributeValue)
-}
-
-// GetGeneralAttribute expects to return a single attribute value (not associated with a specific hostname)
-func GetGeneralAttribute(attributeName string) (result string, err error) {
- return GetHostAttribute("*", attributeName)
-}
diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go
index 0c5cadd2431..3d3dde96034 100644
--- a/go/vt/vtorc/config/config.go
+++ b/go/vt/vtorc/config/config.go
@@ -64,7 +64,6 @@ var (
auditPurgeDuration = 7 * 24 * time.Hour // Equivalent of 7 days
recoveryPeriodBlockDuration = 30 * time.Second
preventCrossCellFailover = false
- lockShardTimeout = 30 * time.Second
waitReplicasTimeout = 30 * time.Second
topoInformationRefreshDuration = 15 * time.Second
recoveryPollDuration = 1 * time.Second
@@ -82,7 +81,8 @@ func RegisterFlags(fs *pflag.FlagSet) {
fs.DurationVar(&auditPurgeDuration, "audit-purge-duration", auditPurgeDuration, "Duration for which audit logs are held before being purged. Should be in multiples of days")
fs.DurationVar(&recoveryPeriodBlockDuration, "recovery-period-block-duration", recoveryPeriodBlockDuration, "Duration for which a new recovery is blocked on an instance after running a recovery")
fs.BoolVar(&preventCrossCellFailover, "prevent-cross-cell-failover", preventCrossCellFailover, "Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover")
- fs.DurationVar(&lockShardTimeout, "lock-shard-timeout", lockShardTimeout, "Duration for which a shard lock is held when running a recovery")
+ fs.Duration("lock-shard-timeout", 30*time.Second, "Duration for which a shard lock is held when running a recovery")
+ _ = fs.MarkDeprecated("lock-shard-timeout", "Please use lock-timeout instead.")
fs.DurationVar(&waitReplicasTimeout, "wait-replicas-timeout", waitReplicasTimeout, "Duration for which to wait for replica's to respond when issuing RPCs")
fs.DurationVar(&topoInformationRefreshDuration, "topo-information-refresh-duration", topoInformationRefreshDuration, "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server")
fs.DurationVar(&recoveryPollDuration, "recovery-poll-duration", recoveryPollDuration, "Timer duration on which VTOrc polls its database to run a recovery")
@@ -103,8 +103,7 @@ type Configuration struct {
AuditPurgeDays uint // Days after which audit entries are purged from the database
RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping
PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, vtorc will do all it can to only fail over within same DC, or else not fail over at all.
- LockShardTimeoutSeconds int // Timeout on context used to lock shard. Should be a small value because we should fail-fast
- WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockShardTimeoutSeconds since that is the total time we use for an ERS.
+ WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockTimeout since that is the total time we use for an ERS.
TopoInformationRefreshSeconds int // Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topo-server.
RecoveryPollSeconds int // Timer duration on which VTOrc recovery analysis runs
}
@@ -133,7 +132,6 @@ func UpdateConfigValuesFromFlags() {
Config.AuditPurgeDays = uint(auditPurgeDuration / (time.Hour * 24))
Config.RecoveryPeriodBlockSeconds = int(recoveryPeriodBlockDuration / time.Second)
Config.PreventCrossDataCenterPrimaryFailover = preventCrossCellFailover
- Config.LockShardTimeoutSeconds = int(lockShardTimeout / time.Second)
Config.WaitReplicasTimeoutSeconds = int(waitReplicasTimeout / time.Second)
Config.TopoInformationRefreshSeconds = int(topoInformationRefreshDuration / time.Second)
Config.RecoveryPollSeconds = int(recoveryPollDuration / time.Second)
@@ -157,7 +155,6 @@ func newConfiguration() *Configuration {
AuditPurgeDays: 7,
RecoveryPeriodBlockSeconds: 30,
PreventCrossDataCenterPrimaryFailover: false,
- LockShardTimeoutSeconds: 30,
WaitReplicasTimeoutSeconds: 30,
TopoInformationRefreshSeconds: 15,
RecoveryPollSeconds: 1,
@@ -165,23 +162,13 @@ func newConfiguration() *Configuration {
}
func (config *Configuration) postReadAdjustments() error {
- if config.IsSQLite() && config.SQLite3DataFile == "" {
+ if config.SQLite3DataFile == "" {
return fmt.Errorf("SQLite3DataFile must be set")
}
return nil
}
-// TODO: Simplify the callers and delete this function
-func (config *Configuration) IsSQLite() bool {
- return true
-}
-
-// TODO: Simplify the callers and delete this function
-func (config *Configuration) IsMySQL() bool {
- return false
-}
-
// read reads configuration from given file, or silently skips if the file does not exist.
// If the file does exist, then it is expected to be in valid JSON format or the function bails out.
func read(fileName string) (*Configuration, error) {
diff --git a/go/vt/vtorc/config/config_test.go b/go/vt/vtorc/config/config_test.go
index 90e78d56623..2009b476f1d 100644
--- a/go/vt/vtorc/config/config_test.go
+++ b/go/vt/vtorc/config/config_test.go
@@ -187,21 +187,6 @@ func TestUpdateConfigValuesFromFlags(t *testing.T) {
require.Equal(t, testConfig, Config)
})
- t.Run("override lockShardTimeout", func(t *testing.T) {
- oldLockShardTimeout := lockShardTimeout
- lockShardTimeout = 3 * time.Hour
- // Restore the changes we make
- defer func() {
- Config = newConfiguration()
- lockShardTimeout = oldLockShardTimeout
- }()
-
- testConfig := newConfiguration()
- testConfig.LockShardTimeoutSeconds = 10800
- UpdateConfigValuesFromFlags()
- require.Equal(t, testConfig, Config)
- })
-
t.Run("override waitReplicasTimeout", func(t *testing.T) {
oldWaitReplicasTimeout := waitReplicasTimeout
waitReplicasTimeout = 3*time.Minute + 4*time.Second
diff --git a/go/vt/vtorc/db/db.go b/go/vt/vtorc/db/db.go
index b71a80a1dc2..04150339c5c 100644
--- a/go/vt/vtorc/db/db.go
+++ b/go/vt/vtorc/db/db.go
@@ -20,8 +20,7 @@ import (
"database/sql"
"strings"
- "github.com/openark/golib/sqlutils"
-
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtorc/config"
)
@@ -54,10 +53,6 @@ func (dummyRes DummySQLResult) RowsAffected() (int64, error) {
return 1, nil
}
-func IsSQLite() bool {
- return config.Config.IsSQLite()
-}
-
// OpenTopology returns the DB instance for the vtorc backed database
func OpenVTOrc() (db *sql.DB, err error) {
var fromCache bool
@@ -73,11 +68,8 @@ func OpenVTOrc() (db *sql.DB, err error) {
return db, err
}
-func translateStatement(statement string) (string, error) {
- if IsSQLite() {
- statement = sqlutils.ToSqlite3Dialect(statement)
- }
- return statement, nil
+func translateStatement(statement string) string {
+ return sqlutils.ToSqlite3Dialect(statement)
}
// registerVTOrcDeployment updates the vtorc_metadata table upon successful deployment
@@ -102,30 +94,8 @@ func deployStatements(db *sql.DB, queries []string) error {
if err != nil {
log.Fatal(err.Error())
}
- // Ugly workaround ahead.
- // Origin of this workaround is the existence of some "timestamp NOT NULL," column definitions,
- // where in NO_ZERO_IN_DATE,NO_ZERO_DATE sql_mode are invalid (since default is implicitly "0")
- // This means installation of vtorc fails on such configured servers, and in particular on 5.7
- // where this setting is the dfault.
- // For purpose of backwards compatability, what we do is force sql_mode to be more relaxed, create the schemas
- // along with the "invalid" definition, and then go ahead and fix those definitions via following ALTER statements.
- // My bad.
- originalSQLMode := ""
- if config.Config.IsMySQL() {
- _ = tx.QueryRow(`select @@session.sql_mode`).Scan(&originalSQLMode)
- if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', '')`); err != nil {
- log.Fatal(err.Error())
- }
- if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_IN_DATE', '')`); err != nil {
- log.Fatal(err.Error())
- }
- }
for _, query := range queries {
- query, err := translateStatement(query)
- if err != nil {
- log.Fatalf("Cannot initiate vtorc: %+v; query=%+v", err, query)
- return err
- }
+ query = translateStatement(query)
if _, err := tx.Exec(query); err != nil {
if strings.Contains(err.Error(), "syntax error") {
log.Fatalf("Cannot initiate vtorc: %+v; query=%+v", err, query)
@@ -144,30 +114,31 @@ func deployStatements(db *sql.DB, queries []string) error {
}
}
}
- if config.Config.IsMySQL() {
- if _, err := tx.Exec(`set session sql_mode=?`, originalSQLMode); err != nil {
- log.Fatal(err.Error())
- }
- }
if err := tx.Commit(); err != nil {
log.Fatal(err.Error())
}
return nil
}
+// ClearVTOrcDatabase is used to clear the VTOrc database. This function is meant to be used by tests to clear the
+// database to get a clean slate without starting a new one.
+func ClearVTOrcDatabase() {
+ db, _, _ := sqlutils.GetSQLiteDB(config.Config.SQLite3DataFile)
+ if db != nil {
+ _ = initVTOrcDB(db)
+ }
+}
+
// initVTOrcDB attempts to create/upgrade the vtorc backend database. It is created once in the
// application's lifetime.
func initVTOrcDB(db *sql.DB) error {
log.Info("Initializing vtorc")
log.Info("Migrating database schema")
- _ = deployStatements(db, generateSQLBase)
- _ = deployStatements(db, generateSQLPatches)
+ _ = deployStatements(db, vtorcBackend)
_ = registerVTOrcDeployment(db)
- if IsSQLite() {
- _, _ = ExecVTOrc(`PRAGMA journal_mode = WAL`)
- _, _ = ExecVTOrc(`PRAGMA synchronous = NORMAL`)
- }
+ _, _ = ExecVTOrc(`PRAGMA journal_mode = WAL`)
+ _, _ = ExecVTOrc(`PRAGMA synchronous = NORMAL`)
return nil
}
@@ -175,10 +146,7 @@ func initVTOrcDB(db *sql.DB) error {
// execInternal
func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) {
var err error
- query, err = translateStatement(query)
- if err != nil {
- return nil, err
- }
+ query = translateStatement(query)
res, err := sqlutils.ExecNoPrepare(db, query, args...)
return res, err
}
@@ -186,10 +154,7 @@ func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) {
// ExecVTOrc will execute given query on the vtorc backend database.
func ExecVTOrc(query string, args ...any) (sql.Result, error) {
var err error
- query, err = translateStatement(query)
- if err != nil {
- return nil, err
- }
+ query = translateStatement(query)
db, err := OpenVTOrc()
if err != nil {
return nil, err
@@ -200,11 +165,7 @@ func ExecVTOrc(query string, args ...any) (sql.Result, error) {
// QueryVTOrcRowsMap
func QueryVTOrcRowsMap(query string, onRow func(sqlutils.RowMap) error) error {
- query, err := translateStatement(query)
- if err != nil {
- log.Fatalf("Cannot query vtorc: %+v; query=%+v", err, query)
- return err
- }
+ query = translateStatement(query)
db, err := OpenVTOrc()
if err != nil {
return err
@@ -215,11 +176,7 @@ func QueryVTOrcRowsMap(query string, onRow func(sqlutils.RowMap) error) error {
// QueryVTOrc
func QueryVTOrc(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error {
- query, err := translateStatement(query)
- if err != nil {
- log.Fatalf("Cannot query vtorc: %+v; query=%+v", err, query)
- return err
- }
+ query = translateStatement(query)
db, err := OpenVTOrc()
if err != nil {
return err
diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go
index 85aed1b2b86..88b26ed2c27 100644
--- a/go/vt/vtorc/db/generate_base.go
+++ b/go/vt/vtorc/db/generate_base.go
@@ -16,803 +16,815 @@
package db
-// generateSQLBase & generateSQLPatches are lists of SQL statements required to build the vtorc backend
-var generateSQLBase = []string{
+// vtorcBackend is a list of SQL statements required to build the vtorc backend
+var vtorcBackend = []string{
+ `
+DROP TABLE IF EXISTS database_instance
+`,
+ `
+CREATE TABLE database_instance (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ last_checked timestamp not null default (''),
+ last_seen timestamp NULL DEFAULT NULL,
+ server_id int NOT NULL,
+ version varchar(128) NOT NULL,
+ binlog_format varchar(16) NOT NULL,
+ log_bin tinyint NOT NULL,
+ log_replica_updates tinyint NOT NULL,
+ binary_log_file varchar(128) NOT NULL,
+ binary_log_pos bigint NOT NULL,
+ source_host varchar(128) NOT NULL,
+ source_port smallint NOT NULL,
+ replica_sql_running tinyint NOT NULL,
+ replica_io_running tinyint NOT NULL,
+ source_log_file varchar(128) NOT NULL,
+ read_source_log_pos bigint NOT NULL,
+ relay_source_log_file varchar(128) NOT NULL,
+ exec_source_log_pos bigint NOT NULL,
+ replication_lag_seconds bigint DEFAULT NULL,
+ replica_lag_seconds bigint DEFAULT NULL,
+ read_only TINYint not null default 0,
+ last_sql_error TEXT not null default '',
+ last_io_error TEXT not null default '',
+ oracle_gtid TINYint not null default 0,
+ mariadb_gtid TINYint not null default 0,
+ relay_log_file varchar(128) not null default '',
+ relay_log_pos bigint not null default 0,
+ pseudo_gtid TINYint not null default 0,
+ replication_depth TINYint not null default 0,
+ has_replication_filters TINYint not null default 0,
+ data_center varchar(32) not null default '',
+ physical_environment varchar(32) not null default '',
+ is_co_primary TINYint not null default 0,
+ sql_delay int not null default 0,
+ binlog_server TINYint not null default 0,
+ supports_oracle_gtid TINYint not null default 0,
+ executed_gtid_set text not null default '',
+ server_uuid varchar(64) not null default '',
+ last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00',
+ gtid_purged text not null default '',
+ has_replication_credentials TINYint not null default 0,
+ allow_tls TINYint not null default 0,
+ semi_sync_enforced TINYint not null default 0,
+ instance_alias varchar(128) not null default '',
+ version_comment varchar(128) NOT NULL DEFAULT '',
+ major_version varchar(16) not null default '',
+ binlog_row_image varchar(16) not null default '',
+ last_discovery_latency bigint not null default 0,
+ semi_sync_primary_enabled TINYint not null default 0,
+ semi_sync_replica_enabled TINYint not null default 0,
+ gtid_mode varchar(32) not null default '',
+ last_check_partial_success tinyint not null default 0,
+ source_uuid varchar(64) not null default '',
+ gtid_errant text not null default '',
+ ancestry_uuid text not null default '',
+ replication_sql_thread_state tinyint signed not null default 0,
+ replication_io_thread_state tinyint signed not null default 0,
+ region varchar(32) not null default '',
+ semi_sync_primary_timeout int NOT NULL DEFAULT 0,
+ semi_sync_primary_wait_for_replica_count int NOT NULL DEFAULT 0,
+ semi_sync_primary_status TINYint NOT NULL DEFAULT 0,
+ semi_sync_replica_status TINYint NOT NULL DEFAULT 0,
+ semi_sync_primary_clients int NOT NULL DEFAULT 0,
+ replication_group_name VARCHAR(64) NOT NULL DEFAULT '',
+ replication_group_is_single_primary_mode TINYint NOT NULL DEFAULT 1,
+ replication_group_member_state VARCHAR(16) NOT NULL DEFAULT '',
+ replication_group_member_role VARCHAR(16) NOT NULL DEFAULT '',
+ replication_group_members text not null default '',
+ replication_group_primary_host varchar(128) NOT NULL DEFAULT '',
+ replication_group_primary_port smallint NOT NULL DEFAULT 0,
+ PRIMARY KEY (hostname,port)
+)`,
+ `
+CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checked)
+ `,
+ `
+CREATE INDEX last_seen_idx_database_instance ON database_instance(last_seen)
+ `,
+ `
+DROP TABLE IF EXISTS database_instance_maintenance
+`,
+ `
+CREATE TABLE database_instance_maintenance (
+ database_instance_maintenance_id integer,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ maintenance_active tinyint(4) DEFAULT NULL,
+ begin_timestamp timestamp NULL DEFAULT NULL,
+ end_timestamp timestamp NULL DEFAULT NULL,
+ owner varchar(128) NOT NULL,
+ reason text NOT NULL,
+ processing_node_hostname varchar(128) not null default '',
+ processing_node_token varchar(128) not null default '',
+ explicitly_bounded TINYint not null default 0,
+ PRIMARY KEY (database_instance_maintenance_id)
+)`,
+ `
+CREATE UNIQUE INDEX maintenance_uidx_database_instance_maintenance ON database_instance_maintenance (maintenance_active, hostname, port)
+ `,
+ `
+DROP TABLE IF EXISTS database_instance_long_running_queries
+`,
+ `
+CREATE TABLE database_instance_long_running_queries (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ process_id bigint(20) NOT NULL,
+ process_started_at timestamp not null default (''),
+ process_user varchar(16) NOT NULL,
+ process_host varchar(128) NOT NULL,
+ process_db varchar(128) NOT NULL,
+ process_command varchar(16) NOT NULL,
+ process_time_seconds int(11) NOT NULL,
+ process_state varchar(128) NOT NULL,
+ process_info varchar(1024) NOT NULL,
+ PRIMARY KEY (hostname,port,process_id)
+)`,
+ `
+CREATE INDEX process_started_at_idx_database_instance_long_running_queries ON database_instance_long_running_queries (process_started_at)
+ `,
+ `
+DROP TABLE IF EXISTS audit
+`,
+ `
+CREATE TABLE audit (
+ audit_id integer,
+ audit_timestamp timestamp not null default (''),
+ audit_type varchar(128) NOT NULL,
+ hostname varchar(128) NOT NULL DEFAULT '',
+ port smallint NOT NULL,
+ message text NOT NULL,
+ keyspace varchar(128) NOT NULL,
+ shard varchar(128) NOT NULL,
+ PRIMARY KEY (audit_id)
+)`,
`
- CREATE TABLE IF NOT EXISTS database_instance (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- last_checked timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_seen timestamp NULL DEFAULT NULL,
- server_id int(10) unsigned NOT NULL,
- version varchar(128) CHARACTER SET ascii NOT NULL,
- binlog_format varchar(16) CHARACTER SET ascii NOT NULL,
- log_bin tinyint(3) unsigned NOT NULL,
- log_replica_updates tinyint(3) unsigned NOT NULL,
- binary_log_file varchar(128) CHARACTER SET ascii NOT NULL,
- binary_log_pos bigint(20) unsigned NOT NULL,
- source_host varchar(128) CHARACTER SET ascii NOT NULL,
- source_port smallint(5) unsigned NOT NULL,
- replica_sql_running tinyint(3) unsigned NOT NULL,
- replica_io_running tinyint(3) unsigned NOT NULL,
- source_log_file varchar(128) CHARACTER SET ascii NOT NULL,
- read_source_log_pos bigint(20) unsigned NOT NULL,
- relay_source_log_file varchar(128) CHARACTER SET ascii NOT NULL,
- exec_source_log_pos bigint(20) unsigned NOT NULL,
- replication_lag_seconds bigint(20) unsigned DEFAULT NULL,
- replica_lag_seconds bigint(20) unsigned DEFAULT NULL,
- cluster_name varchar(128) CHARACTER SET ascii NOT NULL,
- PRIMARY KEY (hostname,port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp)
`,
`
- DROP INDEX cluster_name_idx ON database_instance
+CREATE INDEX host_port_idx_audit ON audit (hostname, port, audit_timestamp)
`,
`
- CREATE INDEX cluster_name_idx_database_instance ON database_instance(cluster_name)
- `,
- `
- DROP INDEX last_checked_idx ON database_instance
- `,
- `
- CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checked)
- `,
- `
- DROP INDEX last_seen_idx ON database_instance
- `,
- `
- CREATE INDEX last_seen_idx_database_instance ON database_instance(last_seen)
- `,
- `
- CREATE TABLE IF NOT EXISTS database_instance_maintenance (
- database_instance_maintenance_id int(10) unsigned NOT NULL AUTO_INCREMENT,
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- maintenance_active tinyint(4) DEFAULT NULL,
- begin_timestamp timestamp NULL DEFAULT NULL,
- end_timestamp timestamp NULL DEFAULT NULL,
- owner varchar(128) CHARACTER SET utf8 NOT NULL,
- reason text CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (database_instance_maintenance_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- DROP INDEX maintenance_uidx ON database_instance_maintenance
- `,
- `
- CREATE UNIQUE INDEX maintenance_uidx_database_instance_maintenance ON database_instance_maintenance (maintenance_active, hostname, port)
- `,
- `
- CREATE TABLE IF NOT EXISTS database_instance_long_running_queries (
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- process_id bigint(20) NOT NULL,
- process_started_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- process_user varchar(16) CHARACTER SET utf8 NOT NULL,
- process_host varchar(128) CHARACTER SET utf8 NOT NULL,
- process_db varchar(128) CHARACTER SET utf8 NOT NULL,
- process_command varchar(16) CHARACTER SET utf8 NOT NULL,
- process_time_seconds int(11) NOT NULL,
- process_state varchar(128) CHARACTER SET utf8 NOT NULL,
- process_info varchar(1024) CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (hostname,port,process_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- DROP INDEX process_started_at_idx ON database_instance_long_running_queries
- `,
- `
- CREATE INDEX process_started_at_idx_database_instance_long_running_queries ON database_instance_long_running_queries (process_started_at)
- `,
- `
- CREATE TABLE IF NOT EXISTS audit (
- audit_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
- audit_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- audit_type varchar(128) CHARACTER SET ascii NOT NULL,
- hostname varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '',
- port smallint(5) unsigned NOT NULL,
- message text CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (audit_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=latin1
- `,
- `
- DROP INDEX audit_timestamp_idx ON audit
- `,
- `
- CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp)
- `,
- `
- DROP INDEX host_port_idx ON audit
- `,
- `
- CREATE INDEX host_port_idx_audit ON audit (hostname, port, audit_timestamp)
- `,
- `
- CREATE TABLE IF NOT EXISTS host_agent (
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- token varchar(128) NOT NULL,
- last_submitted timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_checked timestamp NULL DEFAULT NULL,
- last_seen timestamp NULL DEFAULT NULL,
- mysql_port smallint(5) unsigned DEFAULT NULL,
- count_mysql_snapshots smallint(5) unsigned NOT NULL,
- PRIMARY KEY (hostname)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- DROP INDEX token_idx ON host_agent
- `,
- `
- CREATE INDEX token_idx_host_agent ON host_agent (token)
- `,
- `
- DROP INDEX last_submitted_idx ON host_agent
- `,
- `
- CREATE INDEX last_submitted_idx_host_agent ON host_agent (last_submitted)
- `,
- `
- DROP INDEX last_checked_idx ON host_agent
- `,
- `
- CREATE INDEX last_checked_idx_host_agent ON host_agent (last_checked)
- `,
- `
- DROP INDEX last_seen_idx ON host_agent
- `,
- `
- CREATE INDEX last_seen_idx_host_agent ON host_agent (last_seen)
- `,
- `
- CREATE TABLE IF NOT EXISTS agent_seed (
- agent_seed_id int(10) unsigned NOT NULL AUTO_INCREMENT,
- target_hostname varchar(128) NOT NULL,
- source_hostname varchar(128) NOT NULL,
- start_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
- is_complete tinyint(3) unsigned NOT NULL DEFAULT '0',
- is_successful tinyint(3) unsigned NOT NULL DEFAULT '0',
- PRIMARY KEY (agent_seed_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- DROP INDEX target_hostname_idx ON agent_seed
- `,
- `
- CREATE INDEX target_hostname_idx_agent_seed ON agent_seed (target_hostname,is_complete)
- `,
- `
- DROP INDEX source_hostname_idx ON agent_seed
- `,
- `
- CREATE INDEX source_hostname_idx_agent_seed ON agent_seed (source_hostname,is_complete)
- `,
- `
- DROP INDEX start_timestamp_idx ON agent_seed
- `,
- `
- CREATE INDEX start_timestamp_idx_agent_seed ON agent_seed (start_timestamp)
- `,
- `
- DROP INDEX is_complete_idx ON agent_seed
- `,
- `
- CREATE INDEX is_complete_idx_agent_seed ON agent_seed (is_complete,start_timestamp)
- `,
- `
- DROP INDEX is_successful_idx ON agent_seed
- `,
- `
- CREATE INDEX is_successful_idx_agent_seed ON agent_seed (is_successful, start_timestamp)
- `,
- `
- CREATE TABLE IF NOT EXISTS agent_seed_state (
- agent_seed_state_id int(10) unsigned NOT NULL AUTO_INCREMENT,
- agent_seed_id int(10) unsigned NOT NULL,
- state_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- state_action varchar(127) NOT NULL,
- error_message varchar(255) NOT NULL,
- PRIMARY KEY (agent_seed_state_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- DROP INDEX agent_seed_idx ON agent_seed_state
- `,
+DROP TABLE IF EXISTS host_agent
+`,
`
- CREATE INDEX agent_seed_idx_agent_seed_state ON agent_seed_state (agent_seed_id, state_timestamp)
- `,
+CREATE TABLE host_agent (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ token varchar(128) NOT NULL,
+ last_submitted timestamp not null default (''),
+ last_checked timestamp NULL DEFAULT NULL,
+ last_seen timestamp NULL DEFAULT NULL,
+ mysql_port smallint DEFAULT NULL,
+ count_mysql_snapshots smallint NOT NULL,
+ PRIMARY KEY (hostname)
+)`,
`
- CREATE TABLE IF NOT EXISTS host_attributes (
- hostname varchar(128) NOT NULL,
- attribute_name varchar(128) NOT NULL,
- attribute_value varchar(128) NOT NULL,
- submit_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- expire_timestamp timestamp NULL DEFAULT NULL,
- PRIMARY KEY (hostname,attribute_name)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX token_idx_host_agent ON host_agent (token)
`,
`
- DROP INDEX attribute_name_idx ON host_attributes
+CREATE INDEX last_submitted_idx_host_agent ON host_agent (last_submitted)
`,
`
- CREATE INDEX attribute_name_idx_host_attributes ON host_attributes (attribute_name)
+CREATE INDEX last_checked_idx_host_agent ON host_agent (last_checked)
`,
`
- DROP INDEX attribute_value_idx ON host_attributes
+CREATE INDEX last_seen_idx_host_agent ON host_agent (last_seen)
`,
`
- CREATE INDEX attribute_value_idx_host_attributes ON host_attributes (attribute_value)
- `,
+DROP TABLE IF EXISTS agent_seed
+`,
`
- DROP INDEX submit_timestamp_idx ON host_attributes
- `,
+CREATE TABLE agent_seed (
+ agent_seed_id integer,
+ target_hostname varchar(128) NOT NULL,
+ source_hostname varchar(128) NOT NULL,
+ start_timestamp timestamp not null default (''),
+ end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ is_complete tinyint NOT NULL DEFAULT '0',
+ is_successful tinyint NOT NULL DEFAULT '0',
+ PRIMARY KEY (agent_seed_id)
+)`,
+ `
+CREATE INDEX target_hostname_idx_agent_seed ON agent_seed (target_hostname,is_complete)
+ `,
+ `
+CREATE INDEX source_hostname_idx_agent_seed ON agent_seed (source_hostname,is_complete)
+ `,
`
- CREATE INDEX submit_timestamp_idx_host_attributes ON host_attributes (submit_timestamp)
+CREATE INDEX start_timestamp_idx_agent_seed ON agent_seed (start_timestamp)
`,
`
- DROP INDEX expire_timestamp_idx ON host_attributes
+CREATE INDEX is_complete_idx_agent_seed ON agent_seed (is_complete,start_timestamp)
`,
`
- CREATE INDEX expire_timestamp_idx_host_attributes ON host_attributes (expire_timestamp)
+CREATE INDEX is_successful_idx_agent_seed ON agent_seed (is_successful, start_timestamp)
`,
`
- CREATE TABLE IF NOT EXISTS hostname_resolve (
- hostname varchar(128) NOT NULL,
- resolved_hostname varchar(128) NOT NULL,
- resolved_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (hostname)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+DROP TABLE IF EXISTS agent_seed_state
+`,
`
- DROP INDEX resolved_timestamp_idx ON hostname_resolve
- `,
+CREATE TABLE agent_seed_state (
+ agent_seed_state_id integer,
+ agent_seed_id int NOT NULL,
+ state_timestamp timestamp not null default (''),
+ state_action varchar(127) NOT NULL,
+ error_message varchar(255) NOT NULL,
+ PRIMARY KEY (agent_seed_state_id)
+)`,
`
- CREATE INDEX resolved_timestamp_idx_hostname_resolve ON hostname_resolve (resolved_timestamp)
+CREATE INDEX agent_seed_idx_agent_seed_state ON agent_seed_state (agent_seed_id, state_timestamp)
`,
`
- CREATE TABLE IF NOT EXISTS active_node (
- anchor tinyint unsigned NOT NULL,
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- token varchar(128) NOT NULL,
- last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (anchor)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+DROP TABLE IF EXISTS hostname_resolve
+`,
`
- INSERT IGNORE INTO active_node (anchor, hostname, token, last_seen_active)
- VALUES (1, '', '', NOW())
- `,
+CREATE TABLE hostname_resolve (
+ hostname varchar(128) NOT NULL,
+ resolved_hostname varchar(128) NOT NULL,
+ resolved_timestamp timestamp not null default (''),
+ PRIMARY KEY (hostname)
+)`,
`
- CREATE TABLE IF NOT EXISTS node_health (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- token varchar(128) NOT NULL,
- last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (hostname, token)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX resolved_timestamp_idx_hostname_resolve ON hostname_resolve (resolved_timestamp)
`,
`
- DROP VIEW IF EXISTS _whats_wrong
- `,
+DROP TABLE IF EXISTS active_node
+`,
`
- DROP VIEW IF EXISTS whats_wrong
- `,
+CREATE TABLE active_node (
+ anchor tinyint NOT NULL,
+ hostname varchar(128) NOT NULL,
+ token varchar(128) NOT NULL,
+ last_seen_active timestamp not null default (''),
+ first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ PRIMARY KEY (anchor)
+)`,
`
- DROP VIEW IF EXISTS whats_wrong_summary
- `,
+DROP TABLE IF EXISTS node_health
+`,
`
- CREATE TABLE IF NOT EXISTS topology_recovery (
- recovery_id bigint unsigned not null auto_increment,
- hostname varchar(128) NOT NULL,
- port smallint unsigned NOT NULL,
- in_active_period tinyint unsigned NOT NULL DEFAULT 0,
- start_active_period timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- end_active_period_unixtime int unsigned,
- end_recovery timestamp NULL DEFAULT NULL,
- processing_node_hostname varchar(128) CHARACTER SET ascii NOT NULL,
- processcing_node_token varchar(128) NOT NULL,
- successor_hostname varchar(128) DEFAULT NULL,
- successor_port smallint unsigned DEFAULT NULL,
- PRIMARY KEY (recovery_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+CREATE TABLE node_health (
+ hostname varchar(128) NOT NULL,
+ token varchar(128) NOT NULL,
+ last_seen_active timestamp not null default (''),
+ extra_info varchar(128) not null default '',
+ command varchar(128) not null default '',
+ app_version varchar(64) NOT NULL DEFAULT "",
+ first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ db_backend varchar(255) NOT NULL DEFAULT "",
+ incrementing_indicator bigint not null default 0,
+ PRIMARY KEY (hostname, token)
+)`,
`
- DROP INDEX in_active_start_period_idx ON topology_recovery
- `,
+DROP TABLE IF EXISTS topology_recovery
+`,
`
- CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery (in_active_period, start_active_period)
- `,
+CREATE TABLE topology_recovery (
+ recovery_id integer,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ in_active_period tinyint NOT NULL DEFAULT 0,
+ start_active_period timestamp not null default (''),
+ end_active_period_unixtime int,
+ end_recovery timestamp NULL DEFAULT NULL,
+ processing_node_hostname varchar(128) NOT NULL,
+ processcing_node_token varchar(128) NOT NULL,
+ successor_hostname varchar(128) DEFAULT NULL,
+ successor_port smallint DEFAULT NULL,
+ analysis varchar(128) not null default '',
+ keyspace varchar(128) NOT NULL,
+ shard varchar(128) NOT NULL,
+ count_affected_replicas int not null default 0,
+ is_successful TINYint NOT NULL DEFAULT 0,
+ acknowledged TINYint NOT NULL DEFAULT 0,
+ acknowledged_by varchar(128) not null default '',
+ acknowledge_comment text not null default '',
+ participating_instances text not null default '',
+ lost_replicas text not null default '',
+ all_errors text not null default '',
+ acknowledged_at TIMESTAMP NULL,
+ last_detection_id bigint not null default 0,
+ successor_alias varchar(128) DEFAULT NULL,
+ uid varchar(128) not null default '',
+ PRIMARY KEY (recovery_id)
+)`,
+ `
+CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery (in_active_period, start_active_period)
+ `,
+ `
+CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period)
+ `,
+ `
+CREATE UNIQUE INDEX hostname_port_active_period_uidx_topology_recovery ON topology_recovery (hostname, port, in_active_period, end_active_period_unixtime)
+ `,
+ `
+DROP TABLE IF EXISTS hostname_unresolve
+`,
+ `
+CREATE TABLE hostname_unresolve (
+ hostname varchar(128) NOT NULL,
+ unresolved_hostname varchar(128) NOT NULL,
+ last_registered timestamp not null default (''),
+ PRIMARY KEY (hostname)
+)`,
+ `
+CREATE INDEX unresolved_hostname_idx_hostname_unresolve ON hostname_unresolve (unresolved_hostname)
+ `,
+ `
+DROP TABLE IF EXISTS database_instance_topology_history
+`,
+ `
+CREATE TABLE database_instance_topology_history (
+ snapshot_unix_timestamp int NOT NULL,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ source_host varchar(128) NOT NULL,
+ source_port smallint NOT NULL,
+ keyspace varchar(128) NOT NULL,
+ shard varchar(128) NOT NULL,
+ version varchar(128) not null default '',
+ PRIMARY KEY (snapshot_unix_timestamp, hostname, port)
+)`,
+ `
+CREATE INDEX keyspace_shard_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, keyspace, shard)
+ `,
+ `
+DROP TABLE IF EXISTS candidate_database_instance
+`,
+ `
+CREATE TABLE candidate_database_instance (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ last_suggested timestamp not null default (''),
+ priority TINYINT SIGNED NOT NULL DEFAULT 1,
+ promotion_rule text check(promotion_rule in ('must', 'prefer', 'neutral', 'prefer_not', 'must_not')) NOT NULL DEFAULT 'neutral',
+ PRIMARY KEY (hostname, port)
+)`,
+ `
+CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested)
+ `,
+ `
+DROP TABLE IF EXISTS database_instance_downtime
+`,
+ `
+CREATE TABLE database_instance_downtime (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ downtime_active tinyint(4) DEFAULT NULL,
+ begin_timestamp timestamp default (''),
+ end_timestamp timestamp NULL DEFAULT NULL,
+ owner varchar(128) NOT NULL,
+ reason text NOT NULL,
+ PRIMARY KEY (hostname, port)
+)`,
+ `
+DROP TABLE IF EXISTS topology_failure_detection
+`,
+ `
+CREATE TABLE topology_failure_detection (
+ detection_id integer,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ in_active_period tinyint NOT NULL DEFAULT '0',
+ start_active_period timestamp not null default (''),
+ end_active_period_unixtime int NOT NULL,
+ processing_node_hostname varchar(128) NOT NULL,
+ processcing_node_token varchar(128) NOT NULL,
+ analysis varchar(128) NOT NULL,
+ keyspace varchar(128) NOT NULL,
+ shard varchar(128) NOT NULL,
+ count_affected_replicas int NOT NULL,
+ is_actionable tinyint not null default 0,
+ PRIMARY KEY (detection_id)
+)`,
`
- DROP INDEX start_active_period_idx ON topology_recovery
+CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period)
`,
`
- CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period)
- `,
+DROP TABLE IF EXISTS hostname_resolve_history
+`,
`
- DROP INDEX hostname_port_active_period_uidx ON topology_recovery
- `,
+CREATE TABLE hostname_resolve_history (
+ resolved_hostname varchar(128) NOT NULL,
+ hostname varchar(128) NOT NULL,
+ resolved_timestamp timestamp not null default (''),
+ PRIMARY KEY (resolved_hostname)
+)`,
`
- CREATE UNIQUE INDEX hostname_port_active_period_uidx_topology_recovery ON topology_recovery (hostname, port, in_active_period, end_active_period_unixtime)
+CREATE INDEX hostname_idx_hostname_resolve_history ON hostname_resolve_history (hostname)
`,
`
- CREATE TABLE IF NOT EXISTS hostname_unresolve (
- hostname varchar(128) NOT NULL,
- unresolved_hostname varchar(128) NOT NULL,
- PRIMARY KEY (hostname)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX resolved_timestamp_idx_hostname_resolve_history ON hostname_resolve_history (resolved_timestamp)
`,
`
- DROP INDEX unresolved_hostname_idx ON hostname_unresolve
- `,
+DROP TABLE IF EXISTS hostname_unresolve_history
+`,
`
- CREATE INDEX unresolved_hostname_idx_hostname_unresolve ON hostname_unresolve (unresolved_hostname)
- `,
+CREATE TABLE hostname_unresolve_history (
+ unresolved_hostname varchar(128) NOT NULL,
+ hostname varchar(128) NOT NULL,
+ last_registered timestamp not null default (''),
+ PRIMARY KEY (unresolved_hostname)
+)`,
`
- CREATE TABLE IF NOT EXISTS database_instance_topology_history (
- snapshot_unix_timestamp INT UNSIGNED NOT NULL,
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- source_host varchar(128) CHARACTER SET ascii NOT NULL,
- source_port smallint(5) unsigned NOT NULL,
- cluster_name tinytext CHARACTER SET ascii NOT NULL,
- PRIMARY KEY (snapshot_unix_timestamp, hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX hostname_idx_hostname_unresolve_history ON hostname_unresolve_history (hostname)
`,
`
- DROP INDEX cluster_name_idx ON database_instance_topology_history
+CREATE INDEX last_registered_idx_hostname_unresolve_history ON hostname_unresolve_history (last_registered)
`,
`
- CREATE INDEX cluster_name_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, cluster_name(128))
- `,
+DROP TABLE IF EXISTS primary_position_equivalence
+`,
`
- CREATE TABLE IF NOT EXISTS candidate_database_instance (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+CREATE TABLE primary_position_equivalence (
+ equivalence_id integer,
+ primary1_hostname varchar(128) NOT NULL,
+ primary1_port smallint NOT NULL,
+ primary1_binary_log_file varchar(128) NOT NULL,
+ primary1_binary_log_pos bigint NOT NULL,
+ primary2_hostname varchar(128) NOT NULL,
+ primary2_port smallint NOT NULL,
+ primary2_binary_log_file varchar(128) NOT NULL,
+ primary2_binary_log_pos bigint NOT NULL,
+ last_suggested timestamp not null default (''),
+ PRIMARY KEY (equivalence_id)
+)`,
`
- DROP INDEX last_suggested_idx ON candidate_database_instance
+CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port)
`,
`
- CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested)
+CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos)
`,
`
- CREATE TABLE IF NOT EXISTS database_instance_downtime (
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- downtime_active tinyint(4) DEFAULT NULL,
- begin_timestamp timestamp DEFAULT CURRENT_TIMESTAMP,
- end_timestamp timestamp NULL DEFAULT NULL,
- owner varchar(128) CHARACTER SET utf8 NOT NULL,
- reason text CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested)
`,
`
- CREATE TABLE IF NOT EXISTS topology_failure_detection (
- detection_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
- hostname varchar(128) NOT NULL,
- port smallint unsigned NOT NULL,
- in_active_period tinyint unsigned NOT NULL DEFAULT '0',
- start_active_period timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- end_active_period_unixtime int unsigned NOT NULL,
- processing_node_hostname varchar(128) NOT NULL,
- processcing_node_token varchar(128) NOT NULL,
- analysis varchar(128) NOT NULL,
- cluster_name varchar(128) NOT NULL,
- count_affected_replicas int unsigned NOT NULL,
- PRIMARY KEY (detection_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+DROP TABLE IF EXISTS async_request
+`,
`
- DROP INDEX hostname_port_active_period_uidx ON topology_failure_detection
- `,
+CREATE TABLE async_request (
+ request_id integer,
+ command varchar(128) not null,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ destination_hostname varchar(128) NOT NULL,
+ destination_port smallint NOT NULL,
+ pattern text NOT NULL,
+ gtid_hint varchar(32) not null,
+ begin_timestamp timestamp NULL DEFAULT NULL,
+ end_timestamp timestamp NULL DEFAULT NULL,
+ story text NOT NULL,
+ PRIMARY KEY (request_id)
+)`,
`
- DROP INDEX in_active_start_period_idx ON topology_failure_detection
+CREATE INDEX begin_timestamp_idx_async_request ON async_request (begin_timestamp)
`,
`
- CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period)
+CREATE INDEX end_timestamp_idx_async_request ON async_request (end_timestamp)
`,
`
- CREATE TABLE IF NOT EXISTS hostname_resolve_history (
- resolved_hostname varchar(128) NOT NULL,
- hostname varchar(128) NOT NULL,
- resolved_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (resolved_hostname)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+DROP TABLE IF EXISTS blocked_topology_recovery
+`,
`
- DROP INDEX hostname ON hostname_resolve_history
- `,
+CREATE TABLE blocked_topology_recovery (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ keyspace varchar(128) NOT NULL,
+ shard varchar(128) NOT NULL,
+ analysis varchar(128) NOT NULL,
+ last_blocked_timestamp timestamp not null default (''),
+ blocking_recovery_id bigint,
+ PRIMARY KEY (hostname, port)
+)`,
`
- CREATE INDEX hostname_idx_hostname_resolve_history ON hostname_resolve_history (hostname)
+CREATE INDEX keyspace_shard_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (keyspace, shard, last_blocked_timestamp)
`,
`
- DROP INDEX resolved_timestamp_idx ON hostname_resolve_history
- `,
+DROP TABLE IF EXISTS database_instance_last_analysis
+`,
`
- CREATE INDEX resolved_timestamp_idx_hostname_resolve_history ON hostname_resolve_history (resolved_timestamp)
- `,
+CREATE TABLE database_instance_last_analysis (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ analysis_timestamp timestamp not null default (''),
+ analysis varchar(128) NOT NULL,
+ PRIMARY KEY (hostname, port)
+)`,
`
- CREATE TABLE IF NOT EXISTS hostname_unresolve_history (
- unresolved_hostname varchar(128) NOT NULL,
- hostname varchar(128) NOT NULL,
- last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (unresolved_hostname)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX analysis_timestamp_idx_database_instance_last_analysis ON database_instance_last_analysis (analysis_timestamp)
`,
`
- DROP INDEX hostname ON hostname_unresolve_history
- `,
+DROP TABLE IF EXISTS database_instance_analysis_changelog
+`,
`
- CREATE INDEX hostname_idx_hostname_unresolve_history ON hostname_unresolve_history (hostname)
- `,
+CREATE TABLE database_instance_analysis_changelog (
+ changelog_id integer,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ analysis_timestamp timestamp not null default (''),
+ analysis varchar(128) NOT NULL,
+ PRIMARY KEY (changelog_id)
+)`,
`
- DROP INDEX last_registered_idx ON hostname_unresolve_history
+CREATE INDEX analysis_timestamp_idx_database_instance_analysis_changelog ON database_instance_analysis_changelog (analysis_timestamp)
`,
`
- CREATE INDEX last_registered_idx_hostname_unresolve_history ON hostname_unresolve_history (last_registered)
- `,
+DROP TABLE IF EXISTS node_health_history
+`,
`
- CREATE TABLE IF NOT EXISTS cluster_domain_name (
- cluster_name varchar(128) CHARACTER SET ascii NOT NULL,
- domain_name varchar(128) NOT NULL,
- PRIMARY KEY (cluster_name)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+CREATE TABLE node_health_history (
+ history_id integer,
+ hostname varchar(128) NOT NULL,
+ token varchar(128) NOT NULL,
+ first_seen_active timestamp NOT NULL,
+ extra_info varchar(128) NOT NULL,
+ command varchar(128) not null default '',
+ app_version varchar(64) NOT NULL DEFAULT "",
+ PRIMARY KEY (history_id)
+)`,
`
- DROP INDEX domain_name_idx ON cluster_domain_name
+CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (first_seen_active)
`,
`
- CREATE INDEX domain_name_idx_cluster_domain_name ON cluster_domain_name (domain_name(32))
+CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token)
`,
`
- CREATE TABLE IF NOT EXISTS primary_position_equivalence (
- equivalence_id bigint unsigned not null auto_increment,
- primary1_hostname varchar(128) CHARACTER SET ascii NOT NULL,
- primary1_port smallint(5) unsigned NOT NULL,
- primary1_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL,
- primary1_binary_log_pos bigint(20) unsigned NOT NULL,
- primary2_hostname varchar(128) CHARACTER SET ascii NOT NULL,
- primary2_port smallint(5) unsigned NOT NULL,
- primary2_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL,
- primary2_binary_log_pos bigint(20) unsigned NOT NULL,
- last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (equivalence_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+DROP TABLE IF EXISTS database_instance_coordinates_history
+`,
`
- DROP INDEX equivalence_uidx ON primary_position_equivalence
- `,
+CREATE TABLE database_instance_coordinates_history (
+ history_id integer,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ recorded_timestamp timestamp not null default (''),
+ binary_log_file varchar(128) NOT NULL,
+ binary_log_pos bigint NOT NULL,
+ relay_log_file varchar(128) NOT NULL,
+ relay_log_pos bigint NOT NULL,
+ last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ PRIMARY KEY (history_id)
+)`,
`
- CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port)
+CREATE INDEX hostname_port_recorded_idx_database_instance_coordinates_history ON database_instance_coordinates_history (hostname, port, recorded_timestamp)
`,
`
- DROP INDEX primary2_idx ON primary_position_equivalence
+CREATE INDEX recorded_timestmp_idx_database_instance_coordinates_history ON database_instance_coordinates_history (recorded_timestamp)
`,
`
- CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos)
- `,
+DROP TABLE IF EXISTS database_instance_binlog_files_history
+`,
`
- DROP INDEX last_suggested_idx ON primary_position_equivalence
- `,
+CREATE TABLE database_instance_binlog_files_history (
+ history_id integer,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ binary_log_file varchar(128) NOT NULL,
+ binary_log_pos bigint NOT NULL,
+ first_seen timestamp not null default (''),
+ last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ PRIMARY KEY (history_id)
+)`,
`
- CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested)
+CREATE UNIQUE INDEX hostname_port_file_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (hostname, port, binary_log_file)
`,
`
- CREATE TABLE IF NOT EXISTS async_request (
- request_id bigint unsigned NOT NULL AUTO_INCREMENT,
- command varchar(128) charset ascii not null,
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- destination_hostname varchar(128) NOT NULL,
- destination_port smallint(5) unsigned NOT NULL,
- pattern text CHARACTER SET utf8 NOT NULL,
- gtid_hint varchar(32) charset ascii not null,
- begin_timestamp timestamp NULL DEFAULT NULL,
- end_timestamp timestamp NULL DEFAULT NULL,
- story text CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (request_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX last_seen_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (last_seen)
`,
`
- DROP INDEX begin_timestamp_idx ON async_request
- `,
+DROP TABLE IF EXISTS database_instance_recent_relaylog_history
+`,
`
- CREATE INDEX begin_timestamp_idx_async_request ON async_request (begin_timestamp)
- `,
+CREATE TABLE database_instance_recent_relaylog_history (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ current_relay_log_file varchar(128) NOT NULL,
+ current_relay_log_pos bigint NOT NULL,
+ current_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ prev_relay_log_file varchar(128) NOT NULL,
+ prev_relay_log_pos bigint NOT NULL,
+ prev_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
+ PRIMARY KEY (hostname, port)
+)`,
`
- DROP INDEX end_timestamp_idx ON async_request
+CREATE INDEX current_seen_idx_database_instance_recent_relaylog_history ON database_instance_recent_relaylog_history (current_seen)
`,
`
- CREATE INDEX end_timestamp_idx_async_request ON async_request (end_timestamp)
- `,
+DROP TABLE IF EXISTS vtorc_metadata
+`,
`
- CREATE TABLE IF NOT EXISTS blocked_topology_recovery (
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- cluster_name varchar(128) NOT NULL,
- analysis varchar(128) NOT NULL,
- last_blocked_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- blocking_recovery_id bigint unsigned,
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+CREATE TABLE vtorc_metadata (
+ anchor tinyint NOT NULL,
+ last_deployed_version varchar(128) NOT NULL,
+ last_deployed_timestamp timestamp NOT NULL,
+ PRIMARY KEY (anchor)
+)`,
+ `
+DROP TABLE IF EXISTS vtorc_db_deployments
+`,
`
- DROP INDEX cluster_blocked_idx ON blocked_topology_recovery
- `,
+CREATE TABLE vtorc_db_deployments (
+ deployed_version varchar(128) NOT NULL,
+ deployed_timestamp timestamp NOT NULL,
+ PRIMARY KEY (deployed_version)
+)`,
+ `
+DROP TABLE IF EXISTS global_recovery_disable
+`,
+ `
+CREATE TABLE global_recovery_disable (
+ disable_recovery tinyint NOT NULL ,
+ PRIMARY KEY (disable_recovery)
+)`,
`
- CREATE INDEX cluster_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (cluster_name, last_blocked_timestamp)
- `,
+DROP TABLE IF EXISTS topology_recovery_steps
+`,
`
- CREATE TABLE IF NOT EXISTS database_instance_last_analysis (
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- analysis varchar(128) NOT NULL,
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE TABLE topology_recovery_steps (
+ recovery_step_id integer,
+ recovery_uid varchar(128) NOT NULL,
+ audit_at timestamp not null default (''),
+ message text NOT NULL,
+ PRIMARY KEY (recovery_step_id)
+)`,
+ `
+DROP TABLE IF EXISTS raft_store
+`,
+ `
+CREATE TABLE raft_store (
+ store_id integer,
+ store_key varbinary(512) not null,
+ store_value blob not null,
+ PRIMARY KEY (store_id)
+)`,
+ `
+CREATE INDEX store_key_idx_raft_store ON raft_store (store_key)
+ `,
+ `
+DROP TABLE IF EXISTS raft_log
+`,
+ `
+CREATE TABLE raft_log (
+ log_index integer,
+ term bigint not null,
+ log_type int not null,
+ data blob not null,
+ PRIMARY KEY (log_index)
+)`,
+ `
+DROP TABLE IF EXISTS raft_snapshot
+`,
+ `
+CREATE TABLE raft_snapshot (
+ snapshot_id integer,
+ snapshot_name varchar(128) NOT NULL,
+ snapshot_meta varchar(4096) NOT NULL,
+ created_at timestamp not null default (''),
+ PRIMARY KEY (snapshot_id)
+)`,
+ `
+CREATE UNIQUE INDEX snapshot_name_uidx_raft_snapshot ON raft_snapshot (snapshot_name)
`,
`
- DROP INDEX analysis_timestamp_idx ON database_instance_last_analysis
- `,
+DROP TABLE IF EXISTS database_instance_peer_analysis
+`,
`
- CREATE INDEX analysis_timestamp_idx_database_instance_last_analysis ON database_instance_last_analysis (analysis_timestamp)
- `,
+CREATE TABLE database_instance_peer_analysis (
+ peer varchar(128) NOT NULL,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ analysis_timestamp timestamp not null default (''),
+ analysis varchar(128) NOT NULL,
+ PRIMARY KEY (peer, hostname, port)
+)`,
`
- CREATE TABLE IF NOT EXISTS database_instance_analysis_changelog (
- changelog_id bigint unsigned not null auto_increment,
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- analysis varchar(128) NOT NULL,
- PRIMARY KEY (changelog_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+DROP TABLE IF EXISTS database_instance_tls
+`,
`
- DROP INDEX analysis_timestamp_idx ON database_instance_analysis_changelog
- `,
+CREATE TABLE database_instance_tls (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ required tinyint NOT NULL DEFAULT 0,
+ PRIMARY KEY (hostname,port)
+)`,
`
- CREATE INDEX analysis_timestamp_idx_database_instance_analysis_changelog ON database_instance_analysis_changelog (analysis_timestamp)
- `,
+DROP TABLE IF EXISTS hostname_ips
+`,
`
- CREATE TABLE IF NOT EXISTS node_health_history (
- history_id bigint unsigned not null auto_increment,
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- token varchar(128) NOT NULL,
- first_seen_active timestamp NOT NULL,
- extra_info varchar(128) CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (history_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+CREATE TABLE hostname_ips (
+ hostname varchar(128) NOT NULL,
+ ipv4 varchar(128) NOT NULL,
+ ipv6 varchar(128) NOT NULL,
+ last_updated timestamp not null default (''),
+ PRIMARY KEY (hostname)
+)`,
`
- DROP INDEX first_seen_active_idx ON node_health_history
- `,
+DROP TABLE IF EXISTS database_instance_tags
+`,
`
- CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (first_seen_active)
- `,
+CREATE TABLE database_instance_tags (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ tag_name varchar(128) NOT NULL,
+ tag_value varchar(128) NOT NULL,
+ last_updated timestamp not null default (''),
+ PRIMARY KEY (hostname, port, tag_name)
+)`,
`
- DROP INDEX hostname_token_idx ON node_health_history
+CREATE INDEX tag_name_idx_database_instance_tags ON database_instance_tags (tag_name)
`,
`
- CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token)
- `,
+DROP TABLE IF EXISTS database_instance_stale_binlog_coordinates
+`,
`
- CREATE TABLE IF NOT EXISTS database_instance_coordinates_history (
- history_id bigint unsigned not null auto_increment,
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- recorded_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- binary_log_file varchar(128) NOT NULL,
- binary_log_pos bigint(20) unsigned NOT NULL,
- relay_log_file varchar(128) NOT NULL,
- relay_log_pos bigint(20) unsigned NOT NULL,
- PRIMARY KEY (history_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
+CREATE TABLE database_instance_stale_binlog_coordinates (
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ binary_log_file varchar(128) NOT NULL,
+ binary_log_pos bigint NOT NULL,
+ first_seen timestamp not null default (''),
+ PRIMARY KEY (hostname, port)
+)`,
`
- DROP INDEX hostname_port_recorded_timestmp_idx ON database_instance_coordinates_history
+CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen)
`,
`
- CREATE INDEX hostname_port_recorded_idx_database_instance_coordinates_history ON database_instance_coordinates_history (hostname, port, recorded_timestamp)
- `,
+DROP TABLE IF EXISTS vitess_tablet
+`,
`
- DROP INDEX recorded_timestmp_idx ON database_instance_coordinates_history
- `,
+CREATE TABLE vitess_tablet (
+ alias varchar(256) NOT NULL,
+ hostname varchar(128) NOT NULL,
+ port smallint NOT NULL,
+ keyspace varchar(128) NOT NULL,
+ shard varchar(128) NOT NULL,
+ cell varchar(128) NOT NULL,
+ tablet_type smallint(5) NOT NULL,
+ primary_timestamp timestamp NOT NULL,
+ info varchar(512) NOT NULL,
+ UNIQUE (alias),
+ PRIMARY KEY (hostname, port)
+)`,
`
- CREATE INDEX recorded_timestmp_idx_database_instance_coordinates_history ON database_instance_coordinates_history (recorded_timestamp)
+CREATE INDEX cell_idx_vitess_tablet ON vitess_tablet (cell)
`,
`
- CREATE TABLE IF NOT EXISTS database_instance_binlog_files_history (
- history_id bigint unsigned not null auto_increment,
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- binary_log_file varchar(128) NOT NULL,
- binary_log_pos bigint(20) unsigned NOT NULL,
- first_seen timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
- PRIMARY KEY (history_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX ks_idx_vitess_tablet ON vitess_tablet (keyspace, shard)
`,
`
- DROP INDEX hostname_port_file_idx ON database_instance_binlog_files_history
- `,
+DROP TABLE IF EXISTS vitess_keyspace
+`,
`
- CREATE UNIQUE INDEX hostname_port_file_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (hostname, port, binary_log_file)
- `,
- `
- DROP INDEX last_seen_idx ON database_instance_binlog_files_history
- `,
- `
- CREATE INDEX last_seen_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (last_seen)
- `,
- `
- CREATE TABLE IF NOT EXISTS database_instance_recent_relaylog_history (
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- current_relay_log_file varchar(128) NOT NULL,
- current_relay_log_pos bigint(20) unsigned NOT NULL,
- current_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
- prev_relay_log_file varchar(128) NOT NULL,
- prev_relay_log_pos bigint(20) unsigned NOT NULL,
- prev_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00',
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- DROP INDEX current_seen_idx ON database_instance_recent_relaylog_history
- `,
- `
- CREATE INDEX current_seen_idx_database_instance_recent_relaylog_history ON database_instance_recent_relaylog_history (current_seen)
- `,
- `
- CREATE TABLE IF NOT EXISTS vtorc_metadata (
- anchor tinyint unsigned NOT NULL,
- last_deployed_version varchar(128) CHARACTER SET ascii NOT NULL,
- last_deployed_timestamp timestamp NOT NULL,
- PRIMARY KEY (anchor)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- CREATE TABLE IF NOT EXISTS vtorc_db_deployments (
- deployed_version varchar(128) CHARACTER SET ascii NOT NULL,
- deployed_timestamp timestamp NOT NULL,
- PRIMARY KEY (deployed_version)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- CREATE TABLE IF NOT EXISTS global_recovery_disable (
- disable_recovery tinyint unsigned NOT NULL COMMENT 'Insert 1 to disable recovery globally',
- PRIMARY KEY (disable_recovery)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- CREATE TABLE IF NOT EXISTS topology_recovery_steps (
- recovery_step_id bigint unsigned not null auto_increment,
- recovery_uid varchar(128) CHARACTER SET ascii NOT NULL,
- audit_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- message text CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (recovery_step_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- CREATE TABLE IF NOT EXISTS raft_store (
- store_id bigint unsigned not null auto_increment,
- store_key varbinary(512) not null,
- store_value blob not null,
- PRIMARY KEY (store_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
- `,
- `
- CREATE INDEX store_key_idx_raft_store ON raft_store (store_key)
- `,
+CREATE TABLE vitess_keyspace (
+ keyspace varchar(128) NOT NULL,
+ keyspace_type smallint(5) NOT NULL,
+ durability_policy varchar(512) NOT NULL,
+ PRIMARY KEY (keyspace)
+)`,
`
- CREATE TABLE IF NOT EXISTS raft_log (
- log_index bigint unsigned not null auto_increment,
- term bigint not null,
- log_type int not null,
- data blob not null,
- PRIMARY KEY (log_index)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX source_host_port_idx_database_instance_database_instance on database_instance (source_host, source_port)
`,
`
- CREATE TABLE IF NOT EXISTS raft_snapshot (
- snapshot_id bigint unsigned not null auto_increment,
- snapshot_name varchar(128) CHARACTER SET utf8 NOT NULL,
- snapshot_meta varchar(4096) CHARACTER SET utf8 NOT NULL,
- PRIMARY KEY (snapshot_id)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX active_timestamp_idx_database_instance_maintenance on database_instance_maintenance (maintenance_active, begin_timestamp)
`,
`
- CREATE UNIQUE INDEX snapshot_name_uidx_raft_snapshot ON raft_snapshot (snapshot_name)
+CREATE INDEX active_end_timestamp_idx_database_instance_maintenance on database_instance_maintenance (maintenance_active, end_timestamp)
`,
`
- CREATE TABLE IF NOT EXISTS database_instance_peer_analysis (
- peer varchar(128) NOT NULL,
- hostname varchar(128) NOT NULL,
- port smallint(5) unsigned NOT NULL,
- analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- analysis varchar(128) NOT NULL,
- PRIMARY KEY (peer, hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX last_registered_idx_hostname_unresolve on hostname_unresolve (last_registered)
`,
`
- CREATE TABLE IF NOT EXISTS database_instance_tls (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- required tinyint unsigned NOT NULL DEFAULT 0,
- PRIMARY KEY (hostname,port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX keyspace_shard_in_active_idx_topology_recovery on topology_recovery (keyspace, shard, in_active_period)
`,
`
- CREATE TABLE IF NOT EXISTS cluster_injected_pseudo_gtid (
- cluster_name varchar(128) NOT NULL,
- time_injected timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (cluster_name)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX end_recovery_idx_topology_recovery on topology_recovery (end_recovery)
`,
`
- CREATE TABLE IF NOT EXISTS hostname_ips (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- ipv4 varchar(128) CHARACTER SET ascii NOT NULL,
- ipv6 varchar(128) CHARACTER SET ascii NOT NULL,
- last_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (hostname)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX acknowledged_idx_topology_recovery on topology_recovery (acknowledged, acknowledged_at)
`,
`
- CREATE TABLE IF NOT EXISTS database_instance_tags (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- tag_name varchar(128) CHARACTER SET utf8 NOT NULL,
- tag_value varchar(128) CHARACTER SET utf8 NOT NULL,
- last_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (hostname, port, tag_name)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX last_blocked_idx_blocked_topology_recovery on blocked_topology_recovery (last_blocked_timestamp)
`,
`
- CREATE INDEX tag_name_idx_database_instance_tags ON database_instance_tags (tag_name)
+CREATE INDEX instance_timestamp_idx_database_instance_analysis_changelog on database_instance_analysis_changelog (hostname, port, analysis_timestamp)
`,
`
- CREATE TABLE IF NOT EXISTS database_instance_stale_binlog_coordinates (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- binary_log_file varchar(128) NOT NULL,
- binary_log_pos bigint(20) unsigned NOT NULL,
- first_seen timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX last_detection_idx_topology_recovery on topology_recovery (last_detection_id)
`,
`
- CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen)
+CREATE INDEX last_seen_active_idx_node_health on node_health (last_seen_active)
`,
`
- CREATE TABLE IF NOT EXISTS vitess_tablet (
- hostname varchar(128) CHARACTER SET ascii NOT NULL,
- port smallint(5) unsigned NOT NULL,
- keyspace varchar(128) CHARACTER SET ascii NOT NULL,
- shard varchar(128) CHARACTER SET ascii NOT NULL,
- cell varchar(128) CHARACTER SET ascii NOT NULL,
- tablet_type smallint(5) NOT NULL,
- primary_timestamp timestamp NOT NULL,
- info varchar(512) CHARACTER SET ascii NOT NULL,
- PRIMARY KEY (hostname, port)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid)
`,
`
- CREATE INDEX cell_idx_vitess_tablet ON vitess_tablet (cell)
+CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid)
`,
`
- CREATE INDEX ks_idx_vitess_tablet ON vitess_tablet (keyspace, shard)
+CREATE INDEX end_timestamp_idx_database_instance_downtime ON database_instance_downtime(end_timestamp)
`,
`
- CREATE TABLE IF NOT EXISTS vitess_keyspace (
- keyspace varchar(128) CHARACTER SET ascii NOT NULL,
- keyspace_type smallint(5) NOT NULL,
- durability_policy varchar(512) CHARACTER SET ascii NOT NULL,
- PRIMARY KEY (keyspace)
- ) ENGINE=InnoDB DEFAULT CHARSET=ascii
+CREATE UNIQUE INDEX host_port_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (hostname, port, in_active_period, end_active_period_unixtime, is_actionable)
`,
}
diff --git a/go/vt/vtorc/db/generate_patches.go b/go/vt/vtorc/db/generate_patches.go
deleted file mode 100644
index 93099f51a57..00000000000
--- a/go/vt/vtorc/db/generate_patches.go
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- Copyright 2017 Shlomi Noach, GitHub Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package db
-
-// generateSQLPatches contains DDLs for patching schema to the latest version.
-// Add new statements at the end of the list so they form a changelog.
-var generateSQLPatches = []string{
- `
- ALTER TABLE
- database_instance
- ADD COLUMN read_only TINYINT UNSIGNED NOT NULL AFTER version
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN last_sql_error TEXT NOT NULL AFTER exec_source_log_pos
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN last_io_error TEXT NOT NULL AFTER last_sql_error
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN oracle_gtid TINYINT UNSIGNED NOT NULL AFTER replica_io_running
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN mariadb_gtid TINYINT UNSIGNED NOT NULL AFTER oracle_gtid
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN relay_log_file varchar(128) CHARACTER SET ascii NOT NULL AFTER exec_source_log_pos
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN relay_log_pos bigint unsigned NOT NULL AFTER relay_log_file
- `,
- `
- DROP INDEX source_host_port_idx ON database_instance
- `,
- `
- ALTER TABLE
- database_instance
- ADD INDEX source_host_port_idx_database_instance (source_host, source_port)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN pseudo_gtid TINYINT UNSIGNED NOT NULL AFTER mariadb_gtid
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_depth TINYINT UNSIGNED NOT NULL AFTER cluster_name
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN has_replication_filters TINYINT UNSIGNED NOT NULL AFTER replica_io_running
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN data_center varchar(32) CHARACTER SET ascii NOT NULL AFTER cluster_name
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN physical_environment varchar(32) CHARACTER SET ascii NOT NULL AFTER data_center
- `,
- `
- ALTER TABLE
- database_instance_maintenance
- ADD KEY active_timestamp_idx (maintenance_active, begin_timestamp)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN is_co_primary TINYINT UNSIGNED NOT NULL AFTER replication_depth
- `,
- `
- ALTER TABLE
- database_instance_maintenance
- ADD KEY active_end_timestamp_idx (maintenance_active, end_timestamp)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER replica_lag_seconds
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN analysis varchar(128) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN cluster_name varchar(128) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN count_affected_replicas int unsigned NOT NULL
- `,
- `
- ALTER TABLE hostname_unresolve
- ADD COLUMN last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
- `
- ALTER TABLE hostname_unresolve
- ADD KEY last_registered_idx (last_registered)
- `,
- `
- ALTER TABLE topology_recovery
- ADD KEY cluster_name_in_active_idx (cluster_name, in_active_period)
- `,
- `
- ALTER TABLE topology_recovery
- ADD KEY end_recovery_idx (end_recovery)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN binlog_server TINYINT UNSIGNED NOT NULL AFTER version
- `,
- `
- ALTER TABLE cluster_domain_name
- ADD COLUMN last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
- `
- ALTER TABLE cluster_domain_name
- ADD KEY last_registered_idx (last_registered)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN supports_oracle_gtid TINYINT UNSIGNED NOT NULL AFTER oracle_gtid
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN executed_gtid_set text CHARACTER SET ascii NOT NULL AFTER oracle_gtid
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN server_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER server_id
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN is_successful TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER processcing_node_token
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN acknowledged TINYINT UNSIGNED NOT NULL DEFAULT 0
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN acknowledged_by varchar(128) CHARACTER SET utf8 NOT NULL
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN acknowledge_comment text CHARACTER SET utf8 NOT NULL
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN participating_instances text CHARACTER SET ascii NOT NULL after count_affected_replicas
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN lost_replicas text CHARACTER SET ascii NOT NULL after participating_instances
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN all_errors text CHARACTER SET ascii NOT NULL after lost_replicas
- `,
- `
- ALTER TABLE audit
- ADD COLUMN cluster_name varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER port
- `,
- `
- ALTER TABLE candidate_database_instance
- ADD COLUMN priority TINYINT SIGNED NOT NULL DEFAULT 1 comment 'positive promote, nagative unpromotes'
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN acknowledged_at TIMESTAMP NULL after acknowledged
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD KEY acknowledged_idx (acknowledged, acknowledged_at)
- `,
- `
- ALTER TABLE
- blocked_topology_recovery
- ADD KEY last_blocked_idx (last_blocked_timestamp)
- `,
- `
- ALTER TABLE candidate_database_instance
- ADD COLUMN promotion_rule enum('must', 'prefer', 'neutral', 'prefer_not', 'must_not') NOT NULL DEFAULT 'neutral'
- `,
- `
- ALTER TABLE node_health /* sqlite3-skip */
- DROP PRIMARY KEY,
- ADD PRIMARY KEY (hostname, token)
- `,
- `
- ALTER TABLE node_health
- ADD COLUMN extra_info varchar(128) CHARACTER SET utf8 NOT NULL
- `,
- `
- ALTER TABLE agent_seed /* sqlite3-skip */
- MODIFY end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00'
- `,
- `
- ALTER TABLE active_node /* sqlite3-skip */
- MODIFY last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
-
- `
- ALTER TABLE node_health /* sqlite3-skip */
- MODIFY last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
- `
- ALTER TABLE candidate_database_instance /* sqlite3-skip */
- MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
- `
- ALTER TABLE primary_position_equivalence /* sqlite3-skip */
- MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00' AFTER last_checked
- `,
- `
- ALTER TABLE
- database_instance /* sqlite3-skip */
- MODIFY last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00'
- `,
- `
- ALTER TABLE
- database_instance_analysis_changelog
- ADD KEY instance_timestamp_idx (hostname, port, analysis_timestamp)
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN last_detection_id bigint unsigned NOT NULL
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD KEY last_detection_idx (last_detection_id)
- `,
- `
- ALTER TABLE node_health_history
- ADD COLUMN command varchar(128) CHARACTER SET utf8 NOT NULL
- `,
- `
- ALTER TABLE node_health
- ADD COLUMN command varchar(128) CHARACTER SET utf8 NOT NULL
- `,
- `
- ALTER TABLE database_instance_topology_history
- ADD COLUMN version varchar(128) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN gtid_purged text CHARACTER SET ascii NOT NULL AFTER executed_gtid_set
- `,
- `
- ALTER TABLE
- database_instance_coordinates_history
- ADD COLUMN last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' AFTER recorded_timestamp
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN has_replication_credentials TINYINT UNSIGNED NOT NULL
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN allow_tls TINYINT UNSIGNED NOT NULL AFTER sql_delay
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_enforced TINYINT UNSIGNED NOT NULL AFTER physical_environment
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN instance_alias varchar(128) CHARACTER SET ascii NOT NULL AFTER physical_environment
- `,
- `
- ALTER TABLE
- topology_recovery
- ADD COLUMN successor_alias varchar(128) DEFAULT NULL
- `,
- `
- ALTER TABLE
- database_instance /* sqlite3-skip */
- MODIFY cluster_name varchar(128) NOT NULL
- `,
- `
- ALTER TABLE
- node_health
- ADD INDEX last_seen_active_idx (last_seen_active)
- `,
- `
- ALTER TABLE
- database_instance_maintenance
- ADD COLUMN processing_node_hostname varchar(128) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE
- database_instance_maintenance
- ADD COLUMN processing_node_token varchar(128) NOT NULL
- `,
- `
- ALTER TABLE
- database_instance_maintenance
- ADD COLUMN explicitly_bounded TINYINT UNSIGNED NOT NULL
- `,
- `
- ALTER TABLE node_health_history
- ADD COLUMN app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT ""
- `,
- `
- ALTER TABLE node_health
- ADD COLUMN app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT ""
- `,
- `
- ALTER TABLE node_health_history /* sqlite3-skip */
- MODIFY app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT ""
- `,
- `
- ALTER TABLE node_health /* sqlite3-skip */
- MODIFY app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT ""
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN version_comment varchar(128) NOT NULL DEFAULT ''
- `,
- `
- ALTER TABLE active_node
- ADD COLUMN first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00'
- `,
- `
- ALTER TABLE node_health
- ADD COLUMN first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00'
- `,
- `
- ALTER TABLE database_instance
- ADD COLUMN major_version varchar(16) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN binlog_row_image varchar(16) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE topology_recovery
- ADD COLUMN uid varchar(128) CHARACTER SET ascii NOT NULL
- `,
- `
- CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid)
- `,
- `
- CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN last_discovery_latency bigint not null
- `,
- `
- CREATE INDEX end_timestamp_idx_database_instance_downtime ON database_instance_downtime(end_timestamp)
- `,
- `
- ALTER TABLE
- topology_failure_detection
- ADD COLUMN is_actionable tinyint not null default 0
- `,
- `
- DROP INDEX hostname_port_active_period_uidx_topology_failure_detection ON topology_failure_detection
- `,
- `
- CREATE UNIQUE INDEX host_port_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (hostname, port, in_active_period, end_active_period_unixtime, is_actionable)
- `,
- `
- ALTER TABLE raft_snapshot
- ADD COLUMN created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
- `,
- `
- ALTER TABLE node_health
- ADD COLUMN db_backend varchar(255) CHARACTER SET ascii NOT NULL DEFAULT ""
- `,
- `
- ALTER TABLE node_health
- ADD COLUMN incrementing_indicator bigint not null default 0
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_primary_enabled TINYINT UNSIGNED NOT NULL
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_replica_enabled TINYINT UNSIGNED NOT NULL
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN gtid_mode varchar(32) CHARACTER SET ascii NOT NULL
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN last_check_partial_success tinyint unsigned NOT NULL after last_attempted_check
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN source_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER oracle_gtid
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN gtid_errant text CHARACTER SET ascii NOT NULL AFTER gtid_purged
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN ancestry_uuid text CHARACTER SET ascii NOT NULL AFTER source_uuid
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_sql_thread_state tinyint signed not null default 0 AFTER replica_io_running
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_io_thread_state tinyint signed not null default 0 AFTER replication_sql_thread_state
- `,
- `
- ALTER TABLE
- database_instance_tags /* sqlite3-skip */
- DROP PRIMARY KEY,
- ADD PRIMARY KEY (hostname, port, tag_name)
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN region varchar(32) CHARACTER SET ascii NOT NULL AFTER data_center
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_primary_timeout INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_enabled
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_primary_wait_for_replica_count INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_timeout
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_primary_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_wait_for_replica_count
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_replica_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN semi_sync_primary_clients INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status
- `,
- `
- ALTER TABLE /* sqlite3-skip */
- database_instance
- MODIFY semi_sync_primary_timeout BIGINT UNSIGNED NOT NULL DEFAULT 0
- `,
- // Fields related to Replication Group the instance belongs to
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_name VARCHAR(64) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER gtid_mode
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_is_single_primary_mode TINYINT UNSIGNED NOT NULL DEFAULT 1 AFTER replication_group_name
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_member_state VARCHAR(16) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_is_single_primary_mode
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_member_role VARCHAR(16) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_member_state
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_members text CHARACTER SET ascii NOT NULL AFTER replication_group_member_role
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_primary_host varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_members
- `,
- `
- ALTER TABLE
- database_instance
- ADD COLUMN replication_group_primary_port smallint(5) unsigned NOT NULL DEFAULT 0 AFTER replication_group_primary_host
- `,
-}
diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go
index 657bb6ddea3..21ffdbe4fe8 100644
--- a/go/vt/vtorc/inst/analysis_dao.go
+++ b/go/vt/vtorc/inst/analysis_dao.go
@@ -20,6 +20,7 @@ import (
"fmt"
"time"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"google.golang.org/protobuf/encoding/prototext"
@@ -34,8 +35,6 @@ import (
"github.com/patrickmn/go-cache"
"github.com/rcrowley/go-metrics"
-
- "github.com/openark/golib/sqlutils"
)
var analysisChangeWriteAttemptCounter = metrics.NewCounter()
@@ -63,11 +62,11 @@ type clusterAnalysis struct {
}
// GetReplicationAnalysis will check for replication problems (dead primary; unreachable primary; etc)
-func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints) ([]ReplicationAnalysis, error) {
+func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAnalysisHints) ([]ReplicationAnalysis, error) {
result := []ReplicationAnalysis{}
// TODO(sougou); deprecate ReduceReplicationAnalysisCount
- args := sqlutils.Args(config.Config.ReasonableReplicationLagSeconds, ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, clusterName)
+ args := sqlutils.Args(config.Config.ReasonableReplicationLagSeconds, ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, keyspace, shard)
query := `
SELECT
vitess_tablet.info AS tablet_info,
@@ -80,12 +79,12 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
vitess_keyspace.keyspace_type AS keyspace_type,
vitess_keyspace.durability_policy AS durability_policy,
primary_instance.read_only AS read_only,
+ MIN(primary_instance.hostname) IS NULL AS is_invalid,
MIN(primary_instance.data_center) AS data_center,
MIN(primary_instance.region) AS region,
MIN(primary_instance.physical_environment) AS physical_environment,
MIN(primary_instance.source_host) AS source_host,
MIN(primary_instance.source_port) AS source_port,
- MIN(primary_instance.cluster_name) AS cluster_name,
MIN(primary_instance.binary_log_file) AS binary_log_file,
MIN(primary_instance.binary_log_pos) AS binary_log_pos,
MIN(primary_tablet.info) AS primary_tablet_info,
@@ -97,12 +96,6 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
0
)
) AS is_stale_binlog_coordinates,
- MIN(
- IFNULL(
- cluster_domain_name.domain_name,
- primary_instance.cluster_name
- )
- ) AS cluster_domain,
MIN(
primary_instance.last_checked <= primary_instance.last_seen
and primary_instance.last_attempted_check <= primary_instance.last_seen + interval ? second
@@ -295,7 +288,7 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
JOIN vitess_keyspace ON (
vitess_tablet.keyspace = vitess_keyspace.keyspace
)
- JOIN database_instance primary_instance ON (
+ LEFT JOIN database_instance primary_instance ON (
vitess_tablet.hostname = primary_instance.hostname
AND vitess_tablet.port = primary_instance.port
)
@@ -332,12 +325,10 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
AND replica_instance.port = replica_downtime.port
AND replica_downtime.downtime_active = 1
)
- LEFT JOIN cluster_domain_name ON (
- cluster_domain_name.cluster_name = primary_instance.cluster_name
- )
WHERE
database_instance_maintenance.database_instance_maintenance_id IS NULL
- AND ? IN ('', primary_instance.cluster_name)
+ AND ? IN ('', vitess_keyspace.keyspace)
+ AND ? IN ('', vitess_tablet.shard)
GROUP BY
vitess_tablet.hostname,
vitess_tablet.port
@@ -392,8 +383,8 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
Type: BinaryLog,
}
isStaleBinlogCoordinates := m.GetBool("is_stale_binlog_coordinates")
- a.ClusterDetails.ClusterName = m.GetString("cluster_name")
- a.ClusterDetails.ClusterDomain = m.GetString("cluster_domain")
+ a.ClusterDetails.Keyspace = m.GetString("keyspace")
+ a.ClusterDetails.Shard = m.GetString("shard")
a.GTIDMode = m.GetString("gtid_mode")
a.LastCheckValid = m.GetBool("is_last_check_valid")
a.LastCheckPartialSuccess = m.GetBool("last_check_partial_success")
@@ -441,18 +432,19 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
a.IsReadOnly = m.GetUint("read_only") == 1
if !a.LastCheckValid {
- analysisMessage := fmt.Sprintf("analysis: ClusterName: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v",
- a.ClusterDetails.ClusterName, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary,
+ analysisMessage := fmt.Sprintf("analysis: Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v",
+ a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary,
)
if util.ClearToLog("analysis_dao", analysisMessage) {
log.Infof(analysisMessage)
}
}
- if clusters[a.ClusterDetails.ClusterName] == nil {
- clusters[a.ClusterDetails.ClusterName] = &clusterAnalysis{}
+ keyspaceShard := getKeyspaceShardName(a.ClusterDetails.Keyspace, a.ClusterDetails.Shard)
+ if clusters[keyspaceShard] == nil {
+ clusters[keyspaceShard] = &clusterAnalysis{}
if a.TabletType == topodatapb.TabletType_PRIMARY {
a.IsClusterPrimary = true
- clusters[a.ClusterDetails.ClusterName].primaryKey = &a.AnalyzedInstanceKey
+ clusters[keyspaceShard].primaryKey = &a.AnalyzedInstanceKey
}
durabilityPolicy := m.GetString("durability_policy")
if durabilityPolicy == "" {
@@ -464,10 +456,10 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
log.Errorf("can't get the durability policy %v - %v. Skipping keyspace - %v.", durabilityPolicy, err, a.AnalyzedKeyspace)
return nil
}
- clusters[a.ClusterDetails.ClusterName].durability = durability
+ clusters[keyspaceShard].durability = durability
}
// ca has clusterwide info
- ca := clusters[a.ClusterDetails.ClusterName]
+ ca := clusters[keyspaceShard]
if ca.hasClusterwideAction {
// We can only take one cluster level action at a time.
return nil
@@ -476,6 +468,10 @@ func GetReplicationAnalysis(clusterName string, hints *ReplicationAnalysisHints)
// We failed to load the durability policy, so we shouldn't run any analysis
return nil
}
+ isInvalid := m.GetBool("is_invalid")
+ if isInvalid {
+ return nil
+ }
if a.IsClusterPrimary && !a.LastCheckValid && a.CountReplicas == 0 {
a.Analysis = DeadPrimaryWithoutReplicas
a.Description = "Primary cannot be reached by vtorc and has no replica"
diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go
index a705530a585..480986e34ba 100644
--- a/go/vt/vtorc/inst/analysis_dao_test.go
+++ b/go/vt/vtorc/inst/analysis_dao_test.go
@@ -21,14 +21,17 @@ import (
"github.com/stretchr/testify/require"
- "github.com/openark/golib/sqlutils"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/vtorc/db"
"vitess.io/vitess/go/vt/vtorc/test"
)
-func TestGetReplicationAnalysis(t *testing.T) {
+// TestGetReplicationAnalysisDecision tests the code of GetReplicationAnalysis decision-making. It doesn't check the SQL query
+// run by it. It only checks the analysis part after the rows have been read. This tests fakes the db and explicitly returns the
+// rows that are specified in the test.
+func TestGetReplicationAnalysisDecision(t *testing.T) {
tests := []struct {
name string
info []*test.InfoForRecoveryAnalysis
@@ -519,10 +522,54 @@ func TestGetReplicationAnalysis(t *testing.T) {
keyspaceWanted: "ks",
shardWanted: "0",
codeWanted: NoProblem,
+ }, {
+ // If the database_instance table for a tablet is empty (discovery of MySQL information hasn't happened yet or failed)
+ // then we shouldn't run a failure fix on it until the discovery succeeds
+ name: "Empty database_instance table",
+ info: []*test.InfoForRecoveryAnalysis{{
+ TabletInfo: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101},
+ Hostname: "localhost",
+ Keyspace: "ks",
+ Shard: "0",
+ Type: topodatapb.TabletType_PRIMARY,
+ MysqlHostname: "localhost",
+ MysqlPort: 6708,
+ },
+ DurabilityPolicy: "semi_sync",
+ LastCheckValid: 1,
+ CountReplicas: 4,
+ CountValidReplicas: 4,
+ CountValidReplicatingReplicas: 3,
+ CountValidOracleGTIDReplicas: 4,
+ CountLoggingReplicas: 2,
+ IsPrimary: 1,
+ SemiSyncPrimaryEnabled: 1,
+ }, {
+ TabletInfo: &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100},
+ Hostname: "localhost",
+ Keyspace: "ks",
+ Shard: "0",
+ Type: topodatapb.TabletType_REPLICA,
+ MysqlHostname: "localhost",
+ MysqlPort: 6709,
+ },
+ IsInvalid: 1,
+ DurabilityPolicy: "semi_sync",
+ }},
+ keyspaceWanted: "ks",
+ shardWanted: "0",
+ codeWanted: NoProblem,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
+ oldDB := db.Db
+ defer func() {
+ db.Db = oldDB
+ }()
+
var rowMaps []sqlutils.RowMap
for _, analysis := range tt.info {
analysis.SetValuesFromTabletInfo()
@@ -530,7 +577,7 @@ func TestGetReplicationAnalysis(t *testing.T) {
}
db.Db = test.NewTestDB([][]sqlutils.RowMap{rowMaps})
- got, err := GetReplicationAnalysis("", &ReplicationAnalysisHints{})
+ got, err := GetReplicationAnalysis("", "", &ReplicationAnalysisHints{})
if tt.wantErr != "" {
require.EqualError(t, err, tt.wantErr)
return
@@ -547,3 +594,94 @@ func TestGetReplicationAnalysis(t *testing.T) {
})
}
}
+
+// TestGetReplicationAnalysis tests the entire GetReplicationAnalysis. It inserts data into the database and runs the function.
+// The database is not faked. This is intended to give more test coverage. This test is more comprehensive but more expensive than TestGetReplicationAnalysisDecision.
+// This test is somewhere between a unit test, and an end-to-end test. It is specifically useful for testing situations which are hard to come by in end-to-end test, but require
+// real-world data to test specifically.
+func TestGetReplicationAnalysis(t *testing.T) {
+ // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here.
+ // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica.
+ initialSQL := []string{
+ `INSERT INTO database_instance VALUES('localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'zone1-0000000112','Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0,'',0,'','','[]','',0);`,
+ `INSERT INTO database_instance VALUES('localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'zone1-0000000100','Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0,'',0,'','','[]','',0);`,
+ `INSERT INTO database_instance VALUES('localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'zone1-0000000101','Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2,'',0,'','','[]','',0);`,
+ `INSERT INTO database_instance VALUES('localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'zone2-0000000200','Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0,'',0,'','','[]','',0);`,
+ `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
+ `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
+ `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
+ `INSERT INTO vitess_tablet VALUES('zone2-0000000200','localhost',6756,'ks','0','zone2',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653222207569643a3230307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363735357d20706f72745f6d61703a7b6b65793a227674222076616c75653a363735347d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363735362064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
+ `INSERT INTO vitess_keyspace VALUES('ks',0,'semi_sync');`,
+ }
+
+ // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state.
+ tests := []struct {
+ name string
+ sql []string
+ codeWanted AnalysisCode
+ shardWanted string
+ keyspaceWanted string
+ }{
+ {
+ name: "No additions",
+ sql: nil,
+ codeWanted: NoProblem,
+ }, {
+ name: "Removing Primary Tablet's Vitess record",
+ sql: []string{
+ // This query removes the primary tablet's vitess_tablet record
+ `delete from vitess_tablet where port = 6714`,
+ },
+ codeWanted: ClusterHasNoPrimary,
+ keyspaceWanted: "ks",
+ shardWanted: "0",
+ }, {
+ name: "Removing Primary Tablet's MySQL record",
+ sql: []string{
+ // This query removes the primary tablet's database_instance record
+ `delete from database_instance where port = 6714`,
+ },
+ // As long as we have the vitess record stating that this tablet is the primary
+ // It would be incorrect to run a PRS.
+ // This situation only happens when we haven't been able to read the MySQL information even once for this tablet.
+ // So it is likely a new tablet.
+ codeWanted: NoProblem,
+ }, {
+ name: "Removing Replica Tablet's MySQL record",
+ sql: []string{
+ // This query removes the replica tablet's database_instance record
+ `delete from database_instance where port = 6711`,
+ },
+ // As long as we don't have the MySQL information, we shouldn't do anything.
+ // We should wait for the MySQL information to be refreshed once.
+ // This situation only happens when we haven't been able to read the MySQL information even once for this tablet.
+ // So it is likely a new tablet.
+ codeWanted: NoProblem,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Each test should clear the database. The easiest way to do that is to run all the initialization commands again
+ defer func() {
+ db.ClearVTOrcDatabase()
+ }()
+
+ for _, query := range append(initialSQL, tt.sql...) {
+ _, err := db.ExecVTOrc(query)
+ require.NoError(t, err)
+ }
+
+ got, err := GetReplicationAnalysis("", "", &ReplicationAnalysisHints{})
+ require.NoError(t, err)
+ if tt.codeWanted == NoProblem {
+ require.Len(t, got, 0)
+ return
+ }
+ require.Len(t, got, 1)
+ require.Equal(t, tt.codeWanted, got[0].Analysis)
+ require.Equal(t, tt.keyspaceWanted, got[0].AnalyzedKeyspace)
+ require.Equal(t, tt.shardWanted, got[0].AnalyzedShard)
+ })
+ }
+}
diff --git a/go/vt/vtorc/inst/audit_dao.go b/go/vt/vtorc/inst/audit_dao.go
index 36c690d2513..7882449c655 100644
--- a/go/vt/vtorc/inst/audit_dao.go
+++ b/go/vt/vtorc/inst/audit_dao.go
@@ -22,12 +22,11 @@ import (
"os"
"time"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"github.com/rcrowley/go-metrics"
- "github.com/openark/golib/sqlutils"
-
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
)
@@ -55,9 +54,10 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string)
if instanceKey == nil {
instanceKey = &InstanceKey{}
}
- clusterName := ""
+ keyspace := ""
+ shard := ""
if instanceKey.Hostname != "" {
- clusterName, _ = GetClusterName(instanceKey)
+ keyspace, shard, _ = GetKeyspaceShardName(instanceKey)
}
auditWrittenToFile := false
@@ -71,7 +71,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string)
}
defer f.Close()
- text := fmt.Sprintf("%s\t%s\t%s\t%d\t[%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, instanceKey.Hostname, instanceKey.Port, clusterName, message)
+ text := fmt.Sprintf("%s\t%s\t%s\t%d\t[%s:%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, instanceKey.Hostname, instanceKey.Port, keyspace, shard, message)
if _, err = f.WriteString(text); err != nil {
log.Error(err)
}
@@ -81,15 +81,16 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string)
_, err := db.ExecVTOrc(`
insert
into audit (
- audit_timestamp, audit_type, hostname, port, cluster_name, message
+ audit_timestamp, audit_type, hostname, port, keyspace, shard, message
) VALUES (
- NOW(), ?, ?, ?, ?, ?
+ NOW(), ?, ?, ?, ?, ?, ?
)
`,
auditType,
instanceKey.Hostname,
instanceKey.Port,
- clusterName,
+ keyspace,
+ shard,
message,
)
if err != nil {
@@ -97,7 +98,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string)
return err
}
}
- logMessage := fmt.Sprintf("auditType:%s instance:%s cluster:%s message:%s", auditType, instanceKey.DisplayString(), clusterName, message)
+ logMessage := fmt.Sprintf("auditType:%s instance:%s keyspace:%s shard:%s message:%s", auditType, instanceKey.DisplayString(), keyspace, shard, message)
if syslogWriter != nil {
auditWrittenToFile = true
go func() {
diff --git a/go/vt/vtorc/inst/audit_dao_test.go b/go/vt/vtorc/inst/audit_dao_test.go
new file mode 100644
index 00000000000..4a6533077c2
--- /dev/null
+++ b/go/vt/vtorc/inst/audit_dao_test.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package inst
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/vtorc/config"
+ "vitess.io/vitess/go/vt/vtorc/db"
+)
+
+// TestAuditOperation tests that auditing a operation works as intended based on the configurations.
+func TestAuditOperation(t *testing.T) {
+ // Restore original configurations
+ originalAuditSysLog := config.Config.AuditToSyslog
+ originalAuditLogFile := config.Config.AuditLogFile
+ originalAuditBackend := config.Config.AuditToBackendDB
+ defer func() {
+ config.Config.AuditToSyslog = originalAuditSysLog
+ config.Config.AuditLogFile = originalAuditLogFile
+ config.Config.AuditToBackendDB = originalAuditBackend
+ }()
+
+ orcDb, err := db.OpenVTOrc()
+ require.NoError(t, err)
+ defer func() {
+ _, err = orcDb.Exec("delete from audit")
+ require.NoError(t, err)
+ _, err = orcDb.Exec("delete from vitess_tablet")
+ require.NoError(t, err)
+ }()
+
+ // Store a tablet in the database
+ ks := "ks"
+ shard := "0"
+ hostname := "localhost"
+ var port int32 = 100
+ tab100 := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone-1",
+ Uid: 100,
+ },
+ Hostname: hostname,
+ Keyspace: ks,
+ Shard: shard,
+ Type: topodatapb.TabletType_PRIMARY,
+ MysqlHostname: hostname,
+ MysqlPort: port,
+ }
+ err = SaveTablet(tab100)
+ require.NoError(t, err)
+
+ instance100 := &InstanceKey{
+ Hostname: hostname,
+ Port: int(port),
+ }
+ auditType := "test-audit-operation"
+ message := "test-message"
+
+ t.Run("Audit to backend", func(t *testing.T) {
+ config.Config.AuditLogFile = ""
+ config.Config.AuditToSyslog = false
+ config.Config.AuditToBackendDB = true
+
+ // Auditing should succeed as expected
+ err = AuditOperation(auditType, instance100, message)
+ require.NoError(t, err)
+
+ audits, err := ReadRecentAudit(instance100, 0)
+ require.NoError(t, err)
+ require.Len(t, audits, 1)
+ require.EqualValues(t, 1, audits[0].AuditID)
+ require.EqualValues(t, auditType, audits[0].AuditType)
+ require.EqualValues(t, message, audits[0].Message)
+ require.EqualValues(t, *instance100, audits[0].AuditInstanceKey)
+ })
+
+ t.Run("Audit to File", func(t *testing.T) {
+ config.Config.AuditToBackendDB = false
+ config.Config.AuditToSyslog = false
+
+ file, err := os.CreateTemp("", "test-auditing-*")
+ require.NoError(t, err)
+ defer os.Remove(file.Name())
+ config.Config.AuditLogFile = file.Name()
+
+ err = AuditOperation(auditType, instance100, message)
+ require.NoError(t, err)
+
+ // Give a little time for the write to succeed since it happens in a separate go-routine
+ // There is no way to wait for that write to complete. This sleep is required to prevent this test from
+ // becoming flaky wherein we sometimes read the file before the contents are written.
+ time.Sleep(100 * time.Millisecond)
+ fileContent, err := os.ReadFile(file.Name())
+ require.NoError(t, err)
+ require.Contains(t, string(fileContent), "\ttest-audit-operation\tlocalhost\t100\t[ks:0]\ttest-message")
+ })
+}
diff --git a/go/vt/vtorc/inst/candidate_database_instance_dao.go b/go/vt/vtorc/inst/candidate_database_instance_dao.go
index b9883626eca..95bbb53f617 100644
--- a/go/vt/vtorc/inst/candidate_database_instance_dao.go
+++ b/go/vt/vtorc/inst/candidate_database_instance_dao.go
@@ -17,10 +17,8 @@
package inst
import (
- "github.com/openark/golib/sqlutils"
-
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
-
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
)
diff --git a/go/vt/vtorc/inst/cluster.go b/go/vt/vtorc/inst/cluster.go
index 805b25d7af4..c3a77485e74 100644
--- a/go/vt/vtorc/inst/cluster.go
+++ b/go/vt/vtorc/inst/cluster.go
@@ -18,8 +18,8 @@ package inst
// ClusterInfo makes for a cluster status/info summary
type ClusterInfo struct {
- ClusterName string
- ClusterDomain string // CNAME/VIP/A-record/whatever of the primary of this cluster
+ Keyspace string
+ Shard string
CountInstances uint
HeuristicLag int64
HasAutomatedPrimaryRecovery bool
diff --git a/go/vt/vtorc/inst/cluster_domain_dao.go b/go/vt/vtorc/inst/cluster_domain_dao.go
deleted file mode 100644
index 45aed648be1..00000000000
--- a/go/vt/vtorc/inst/cluster_domain_dao.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- Copyright 2015 Shlomi Noach, courtesy Booking.com
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package inst
-
-import (
- "vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/vtorc/config"
- "vitess.io/vitess/go/vt/vtorc/db"
-)
-
-// WriteClusterDomainName will write (and override) the domain name of a cluster
-func WriteClusterDomainName(clusterName string, domainName string) error {
- writeFunc := func() error {
- _, err := db.ExecVTOrc(`
- insert into
- cluster_domain_name (cluster_name, domain_name, last_registered)
- values
- (?, ?, NOW())
- on duplicate key update
- domain_name=values(domain_name),
- last_registered=values(last_registered)
- `,
- clusterName, domainName)
- if err != nil {
- log.Error(err)
- }
- return err
- }
- return ExecDBWriteFunc(writeFunc)
-}
-
-// ExpireClusterDomainName expires cluster_domain_name entries that haven't been updated recently.
-func ExpireClusterDomainName() error {
- writeFunc := func() error {
- _, err := db.ExecVTOrc(`
- delete from cluster_domain_name
- where last_registered < NOW() - INTERVAL ? MINUTE
- `, config.ExpiryHostnameResolvesMinutes,
- )
- if err != nil {
- log.Error(err)
- }
- return err
- }
- return ExecDBWriteFunc(writeFunc)
-}
diff --git a/go/vt/vtorc/inst/downtime_dao.go b/go/vt/vtorc/inst/downtime_dao.go
index 1d5c33873b5..53b12e325e8 100644
--- a/go/vt/vtorc/inst/downtime_dao.go
+++ b/go/vt/vtorc/inst/downtime_dao.go
@@ -135,7 +135,7 @@ func renewLostInRecoveryDowntime() error {
// expireLostInRecoveryDowntime expires downtime for servers who have been lost in recovery in the last,
// but are now replicating.
func expireLostInRecoveryDowntime() error {
- instances, err := ReadLostInRecoveryInstances("")
+ instances, err := ReadLostInRecoveryInstances("", "")
if err != nil {
return err
}
diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go
index d4b8c7bfe88..dd1526ff090 100644
--- a/go/vt/vtorc/inst/instance.go
+++ b/go/vt/vtorc/inst/instance.go
@@ -74,7 +74,6 @@ type Instance struct {
primaryExecutedGtidSet string // Not exported
ReplicationLagSeconds sql.NullInt64
- ClusterName string
DataCenter string
Region string
PhysicalEnvironment string
diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go
index 455319781b4..a799e4e3cb4 100644
--- a/go/vt/vtorc/inst/instance_dao.go
+++ b/go/vt/vtorc/inst/instance_dao.go
@@ -31,7 +31,7 @@ import (
"github.com/rcrowley/go-metrics"
"github.com/sjmudd/stopwatch"
- "github.com/openark/golib/sqlutils"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
vitessmysql "vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/tb"
@@ -70,8 +70,6 @@ const (
GroupReplicationMemberStateError = "ERROR"
)
-// instanceKeyInformativeClusterName is a non-authoritative cache; used for auditing or general purpose.
-var instanceKeyInformativeClusterName *cache.Cache
var forgetInstanceKeys *cache.Cache
var accessDeniedCounter = metrics.NewCounter()
@@ -96,7 +94,6 @@ func init() {
func initializeInstanceDao() {
config.WaitForConfigurationToBeLoaded()
- instanceKeyInformativeClusterName = cache.New(time.Duration(config.Config.InstancePollSeconds/2)*time.Second, time.Second)
forgetInstanceKeys = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second)
}
@@ -223,7 +220,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch
if err != nil {
goto Cleanup
}
- instance.ClusterName = GetClusterNameFromKeyspaceAndShard(tablet.Keyspace, tablet.Shard)
fullStatus, err = FullStatus(*instanceKey)
if err != nil {
@@ -454,13 +450,13 @@ Cleanup:
// tried to check the instance. last_attempted_check is also
// updated on success by writeInstance.
latency.Start("backend")
- _ = UpdateInstanceLastChecked(&instance.Key, partialSuccess)
+ _ = UpdateInstanceLastChecked(instanceKey, partialSuccess)
latency.Stop("backend")
return nil, err
}
-// GetClusterNameFromKeyspaceAndShard returns the cluster name from keyspace and shard
-func GetClusterNameFromKeyspaceAndShard(keyspace, shard string) string {
+// getKeyspaceShardName returns a single string having both the keyspace and shard
+func getKeyspaceShardName(keyspace, shard string) string {
return fmt.Sprintf("%v:%v", keyspace, shard)
}
@@ -512,7 +508,6 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) {
var primaryOrGroupPrimaryExecutedGtidSet string
primaryOrGroupPrimaryDataFound := false
- // Read the cluster_name of the _primary_ or _group_primary_ of our instance, derive it from there.
query := `
select
replication_depth,
@@ -609,7 +604,6 @@ func readInstanceRow(m sqlutils.RowMap) *Instance {
instance.SecondsBehindPrimary = m.GetNullInt64("replication_lag_seconds")
instance.ReplicationLagSeconds = m.GetNullInt64("replica_lag_seconds")
instance.SQLDelay = m.GetUint("sql_delay")
- instance.ClusterName = m.GetString("cluster_name")
instance.DataCenter = m.GetString("data_center")
instance.Region = m.GetString("region")
instance.PhysicalEnvironment = m.GetString("physical_environment")
@@ -779,9 +773,10 @@ func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryKey *InstanceKe
}
// ReadProblemInstances reads all instances with problems
-func ReadProblemInstances(clusterName string) ([](*Instance), error) {
+func ReadProblemInstances(keyspace string, shard string) ([](*Instance), error) {
condition := `
- cluster_name LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
+ keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
+ and shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END)
and (
(last_seen < last_checked)
or (unix_timestamp() - unix_timestamp(last_checked) > ?)
@@ -794,7 +789,7 @@ func ReadProblemInstances(clusterName string) ([](*Instance), error) {
)
`
- args := sqlutils.Args(clusterName, clusterName, config.Config.InstancePollSeconds*5, config.Config.ReasonableReplicationLagSeconds, config.Config.ReasonableReplicationLagSeconds)
+ args := sqlutils.Args(keyspace, keyspace, shard, shard, config.Config.InstancePollSeconds*5, config.Config.ReasonableReplicationLagSeconds, config.Config.ReasonableReplicationLagSeconds)
instances, err := readInstancesByCondition(condition, args, "")
if err != nil {
return instances, err
@@ -814,15 +809,16 @@ func ReadProblemInstances(clusterName string) ([](*Instance), error) {
// ReadLostInRecoveryInstances returns all instances (potentially filtered by cluster)
// which are currently indicated as downtimed due to being lost during a topology recovery.
-func ReadLostInRecoveryInstances(clusterName string) ([](*Instance), error) {
+func ReadLostInRecoveryInstances(keyspace string, shard string) ([](*Instance), error) {
condition := `
ifnull(
database_instance_downtime.downtime_active = 1
and database_instance_downtime.end_timestamp > now()
and database_instance_downtime.reason = ?, 0)
- and ? IN ('', cluster_name)
+ and ? IN ('', keyspace)
+ and ? IN ('', shard)
`
- return readInstancesByCondition(condition, sqlutils.Args(DowntimeLostInRecoveryMessage, clusterName), "cluster_name asc, replication_depth asc")
+ return readInstancesByCondition(condition, sqlutils.Args(DowntimeLostInRecoveryMessage, keyspace, shard), "keyspace asc, shard asc, replication_depth asc")
}
// readUnseenPrimaryKeys will read list of primaries that have never been seen, and yet whose replicas
@@ -863,46 +859,6 @@ func readUnseenPrimaryKeys() ([]InstanceKey, error) {
return res, nil
}
-// InjectSeed: intented to be used to inject an instance upon startup, assuming it's not already known to vtorc.
-func InjectSeed(instanceKey *InstanceKey) error {
- if instanceKey == nil {
- return fmt.Errorf("InjectSeed: nil instanceKey")
- }
- clusterName := instanceKey.StringCode()
- // minimal details:
- instance := &Instance{Key: *instanceKey, Version: "Unknown", ClusterName: clusterName}
- instance.SetSeed()
- err := WriteInstance(instance, false, nil)
- log.Infof("InjectSeed: %+v, %+v", *instanceKey, err)
- _ = AuditOperation("inject-seed", instanceKey, "injected")
- return err
-}
-
-// InjectUnseenPrimaries will review primaries of instances that are known to be replicating, yet which are not listed
-// in database_instance. Since their replicas are listed as replicating, we can assume that such primaries actually do
-// exist: we shall therefore inject them with minimal details into the database_instance table.
-func InjectUnseenPrimaries() error {
-
- unseenPrimaryKeys, err := readUnseenPrimaryKeys()
- if err != nil {
- return err
- }
-
- operations := 0
- for _, primaryKey := range unseenPrimaryKeys {
- primaryKey := primaryKey
- clusterName := primaryKey.StringCode()
- // minimal details:
- instance := Instance{Key: primaryKey, Version: "Unknown", ClusterName: clusterName}
- if err := WriteInstance(&instance, false, nil); err == nil {
- operations++
- }
- }
-
- _ = AuditOperation("inject-unseen-primaries", nil, fmt.Sprintf("Operations: %d", operations))
- return err
-}
-
// ForgetUnseenInstancesDifferentlyResolved will purge instances which are invalid, and whose hostname
// appears on the hostname_resolved table; this means some time in the past their hostname was unresovled, and now
// resovled to a different value; the old hostname is never accessed anymore and the old entry should be removed.
@@ -999,28 +955,27 @@ func ResolveUnknownPrimaryHostnameResolves() error {
return err
}
-func GetClusterName(instanceKey *InstanceKey) (clusterName string, err error) {
- if clusterName, found := instanceKeyInformativeClusterName.Get(instanceKey.StringCode()); found {
- return clusterName.(string), nil
- }
+// GetKeyspaceShardName gets the keyspace shard name for the given instance key
+func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard string, err error) {
query := `
select
- ifnull(max(cluster_name), '') as cluster_name
+ keyspace,
+ shard
from
- database_instance
+ vitess_tablet
where
hostname = ?
and port = ?
`
err = db.QueryVTOrc(query, sqlutils.Args(instanceKey.Hostname, instanceKey.Port), func(m sqlutils.RowMap) error {
- clusterName = m.GetString("cluster_name")
- instanceKeyInformativeClusterName.Set(instanceKey.StringCode(), clusterName, cache.DefaultExpiration)
+ keyspace = m.GetString("keyspace")
+ shard = m.GetString("shard")
return nil
})
if err != nil {
log.Error(err)
}
- return clusterName, err
+ return keyspace, shard, err
}
// ReadOutdatedInstanceKeys reads and returns keys for all instances that are not up to date (i.e.
@@ -1177,7 +1132,6 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo
"replication_lag_seconds",
"replica_lag_seconds",
"sql_delay",
- "cluster_name",
"data_center",
"region",
"physical_environment",
@@ -1264,7 +1218,6 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo
args = append(args, instance.SecondsBehindPrimary)
args = append(args, instance.ReplicationLagSeconds)
args = append(args, instance.SQLDelay)
- args = append(args, instance.ClusterName)
args = append(args, instance.DataCenter)
args = append(args, instance.Region)
args = append(args, instance.PhysicalEnvironment)
@@ -1454,10 +1407,10 @@ func SnapshotTopologies() error {
_, err := db.ExecVTOrc(`
insert ignore into
database_instance_topology_history (snapshot_unix_timestamp,
- hostname, port, source_host, source_port, cluster_name, version)
+ hostname, port, source_host, source_port, version)
select
UNIX_TIMESTAMP(NOW()),
- hostname, port, source_host, source_port, cluster_name, version
+ hostname, port, source_host, source_port, version
from
database_instance
`,
diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go
index 5d2ba5a73ce..71d0ed94ff9 100644
--- a/go/vt/vtorc/inst/instance_dao_test.go
+++ b/go/vt/vtorc/inst/instance_dao_test.go
@@ -8,6 +8,9 @@ import (
"testing"
"github.com/stretchr/testify/require"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/vtorc/db"
)
var (
@@ -60,17 +63,17 @@ func TestMkInsertOdkuSingle(t *testing.T) {
version, major_version, version_comment, binlog_server, read_only, binlog_format,
binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port,
replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid,
- source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, cluster_name, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen)
+ source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen)
VALUES
- (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW())
+ (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW())
ON DUPLICATE KEY UPDATE
- hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), cluster_name=VALUES(cluster_name), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls),
+ hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls),
semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status),
instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen)
`
a1 := `i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT,
FULL, false, false, , 0, , 0,
- false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, `
+ false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, `
sql1, args1, err := mkInsertOdkuForInstances(instances[:1], false, true)
require.NoError(t, err)
@@ -83,22 +86,22 @@ func TestMkInsertOdkuThree(t *testing.T) {
// three instances
s3 := `INSERT INTO database_instance
- (hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, cluster_name, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count,
+ (hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count,
semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen)
VALUES
- (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()),
- (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()),
- (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW())
+ (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()),
+ (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()),
+ (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW())
ON DUPLICATE KEY UPDATE
- hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), cluster_name=VALUES(cluster_name), data_center=VALUES(data_center), region=VALUES(region),
+ hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region),
physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced),
semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status),
instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen)
`
a3 := `
- i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0,
- i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0,
- i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0,
+ i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0,
+ i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0,
+ i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0,
`
sql3, args3, err := mkInsertOdkuForInstances(instances[:3], true, true)
@@ -115,3 +118,40 @@ func fmtArgs(args []any) string {
}
return b.String()
}
+
+func TestGetKeyspaceShardName(t *testing.T) {
+ orcDb, err := db.OpenVTOrc()
+ require.NoError(t, err)
+ defer func() {
+ _, err = orcDb.Exec("delete from vitess_tablet")
+ require.NoError(t, err)
+ }()
+
+ ks := "ks"
+ shard := "0"
+ hostname := "localhost"
+ var port int32 = 100
+ tab100 := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: "zone-1",
+ Uid: 100,
+ },
+ Hostname: hostname,
+ Keyspace: ks,
+ Shard: shard,
+ Type: topodatapb.TabletType_PRIMARY,
+ MysqlHostname: hostname,
+ MysqlPort: port,
+ }
+
+ err = SaveTablet(tab100)
+ require.NoError(t, err)
+
+ keyspaceRead, shardRead, err := GetKeyspaceShardName(&InstanceKey{
+ Hostname: hostname,
+ Port: int(port),
+ })
+ require.NoError(t, err)
+ require.Equal(t, ks, keyspaceRead)
+ require.Equal(t, shard, shardRead)
+}
diff --git a/go/vt/vtorc/inst/keyspace_dao.go b/go/vt/vtorc/inst/keyspace_dao.go
index e02d13fc448..7e55471854d 100644
--- a/go/vt/vtorc/inst/keyspace_dao.go
+++ b/go/vt/vtorc/inst/keyspace_dao.go
@@ -19,8 +19,7 @@ package inst
import (
"errors"
- "github.com/openark/golib/sqlutils"
-
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vtorc/db"
diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go
index 9ccfca7f09d..56ad06ec9e5 100644
--- a/go/vt/vtorc/inst/keyspace_dao_test.go
+++ b/go/vt/vtorc/inst/keyspace_dao_test.go
@@ -19,8 +19,8 @@ package inst
import (
"testing"
- _ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/require"
+ _ "modernc.org/sqlite"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
diff --git a/go/vt/vtorc/inst/minimal_instance.go b/go/vt/vtorc/inst/minimal_instance.go
deleted file mode 100644
index 1eeb85663d3..00000000000
--- a/go/vt/vtorc/inst/minimal_instance.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package inst
-
-type MinimalInstance struct {
- Key InstanceKey
- PrimaryKey InstanceKey
- ClusterName string
-}
-
-func (minimalInstance *MinimalInstance) ToInstance() *Instance {
- return &Instance{
- Key: minimalInstance.Key,
- SourceKey: minimalInstance.PrimaryKey,
- ClusterName: minimalInstance.ClusterName,
- }
-}
diff --git a/go/vt/vtorc/inst/resolve_dao.go b/go/vt/vtorc/inst/resolve_dao.go
index 2af0bf6d32d..d38146469d2 100644
--- a/go/vt/vtorc/inst/resolve_dao.go
+++ b/go/vt/vtorc/inst/resolve_dao.go
@@ -19,10 +19,9 @@ package inst
import (
"github.com/rcrowley/go-metrics"
- "vitess.io/vitess/go/vt/log"
-
- "github.com/openark/golib/sqlutils"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
+ "vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
)
diff --git a/go/vt/vtorc/inst/tablet_dao.go b/go/vt/vtorc/inst/tablet_dao.go
index f9d00ca8ea3..cd762a6883e 100644
--- a/go/vt/vtorc/inst/tablet_dao.go
+++ b/go/vt/vtorc/inst/tablet_dao.go
@@ -20,14 +20,13 @@ import (
"context"
"errors"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
- "github.com/openark/golib/sqlutils"
-
"vitess.io/vitess/go/vt/logutil"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
@@ -181,11 +180,12 @@ func SaveTablet(tablet *topodatapb.Tablet) error {
_, err = db.ExecVTOrc(`
replace
into vitess_tablet (
- hostname, port, cell, keyspace, shard, tablet_type, primary_timestamp, info
+ alias, hostname, port, cell, keyspace, shard, tablet_type, primary_timestamp, info
) values (
- ?, ?, ?, ?, ?, ?, ?, ?
+ ?, ?, ?, ?, ?, ?, ?, ?, ?
)
`,
+ topoproto.TabletAliasString(tablet.Alias),
tablet.MysqlHostname,
int(tablet.MysqlPort),
tablet.Alias.Cell,
diff --git a/go/vt/vtorc/inst/tag_dao.go b/go/vt/vtorc/inst/tag_dao.go
index b49601198be..5b5962a9326 100644
--- a/go/vt/vtorc/inst/tag_dao.go
+++ b/go/vt/vtorc/inst/tag_dao.go
@@ -19,10 +19,8 @@ package inst
import (
"fmt"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
-
- "github.com/openark/golib/sqlutils"
-
"vitess.io/vitess/go/vt/vtorc/db"
)
diff --git a/go/vt/vtorc/logic/disable_recovery.go b/go/vt/vtorc/logic/disable_recovery.go
index f5d18381452..4a3766055d2 100644
--- a/go/vt/vtorc/logic/disable_recovery.go
+++ b/go/vt/vtorc/logic/disable_recovery.go
@@ -32,8 +32,7 @@ package logic
import (
"fmt"
- "github.com/openark/golib/sqlutils"
-
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtorc/db"
)
diff --git a/go/vt/vtorc/logic/keyspace_discovery_test.go b/go/vt/vtorc/logic/keyspace_discovery_test.go
index 2abaa29e83d..e5be1fd82f2 100644
--- a/go/vt/vtorc/logic/keyspace_discovery_test.go
+++ b/go/vt/vtorc/logic/keyspace_discovery_test.go
@@ -20,9 +20,9 @@ import (
"context"
"testing"
- _ "github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ _ "modernc.org/sqlite"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
diff --git a/go/vt/vtorc/logic/orchestrator.go b/go/vt/vtorc/logic/orchestrator.go
index 827b40ebd96..dcc30027392 100644
--- a/go/vt/vtorc/logic/orchestrator.go
+++ b/go/vt/vtorc/logic/orchestrator.go
@@ -24,12 +24,12 @@ import (
"syscall"
"time"
- "vitess.io/vitess/go/vt/log"
-
"github.com/patrickmn/go-cache"
"github.com/rcrowley/go-metrics"
"github.com/sjmudd/stopwatch"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtorc/collection"
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/discovery"
@@ -105,49 +105,52 @@ func instancePollSecondsDuration() time.Duration {
return time.Duration(config.Config.InstancePollSeconds) * time.Second
}
-// acceptSignals registers for OS signals
-func acceptSignals() {
+// acceptSighupSignal registers for SIGHUP signal from the OS to reload the configuration files.
+func acceptSighupSignal() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
- signal.Notify(c, syscall.SIGTERM)
go func() {
- for sig := range c {
- switch sig {
- case syscall.SIGHUP:
- log.Infof("Received SIGHUP. Reloading configuration")
- _ = inst.AuditOperation("reload-configuration", nil, "Triggered via SIGHUP")
- config.Reload()
- discoveryMetrics.SetExpirePeriod(time.Duration(config.DiscoveryCollectionRetentionSeconds) * time.Second)
- case syscall.SIGTERM:
- log.Infof("Received SIGTERM. Starting shutdown")
- atomic.StoreInt32(&hasReceivedSIGTERM, 1)
- discoveryMetrics.StopAutoExpiration()
- // probably should poke other go routines to stop cleanly here ...
- _ = inst.AuditOperation("shutdown", nil, "Triggered via SIGTERM")
- timeout := time.After(shutdownWaitTime)
- func() {
- for {
- count := atomic.LoadInt32(&shardsLockCounter)
- if count == 0 {
- return
- }
- select {
- case <-timeout:
- log.Infof("wait for lock release timed out. Some locks might not have been released.")
- return
- default:
- time.Sleep(100 * time.Millisecond)
- }
- }
- }()
- log.Infof("Shutting down vtorc")
- os.Exit(0)
- }
+ for range c {
+ log.Infof("Received SIGHUP. Reloading configuration")
+ _ = inst.AuditOperation("reload-configuration", nil, "Triggered via SIGHUP")
+ config.Reload()
+ discoveryMetrics.SetExpirePeriod(time.Duration(config.DiscoveryCollectionRetentionSeconds) * time.Second)
}
}()
}
+// closeVTOrc runs all the operations required to cleanly shutdown VTOrc
+func closeVTOrc() {
+ log.Infof("Starting VTOrc shutdown")
+ atomic.StoreInt32(&hasReceivedSIGTERM, 1)
+ discoveryMetrics.StopAutoExpiration()
+ // Poke other go routines to stop cleanly here ...
+ _ = inst.AuditOperation("shutdown", nil, "Triggered via SIGTERM")
+ // wait for the locks to be released
+ waitForLocksRelease()
+ log.Infof("VTOrc closed")
+}
+
+// waitForLocksRelease is used to wait for release of locks
+func waitForLocksRelease() {
+ timeout := time.After(shutdownWaitTime)
+ for {
+ count := atomic.LoadInt32(&shardsLockCounter)
+ if count == 0 {
+ break
+ }
+ select {
+ case <-timeout:
+ log.Infof("wait for lock release timed out. Some locks might not have been released.")
+ default:
+ time.Sleep(50 * time.Millisecond)
+ continue
+ }
+ break
+ }
+}
+
// handleDiscoveryRequests iterates the discoveryQueue channel and calls upon
// instance discovery per entry.
func handleDiscoveryRequests() {
@@ -231,6 +234,10 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) {
backendLatency := latency.Elapsed("backend")
instanceLatency := latency.Elapsed("instance")
+ if forceDiscovery {
+ log.Infof("Force discovered - %+v, err - %v", instance, err)
+ }
+
if instance == nil {
failedDiscoveriesCounter.Inc(1)
_ = discoveryMetrics.Append(&discovery.Metric{
@@ -252,10 +259,6 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) {
return
}
- if forceDiscovery {
- log.Infof("Force discovered - %+v", instance)
- }
-
_ = discoveryMetrics.Append(&discovery.Metric{
Timestamp: time.Now(),
InstanceKey: instanceKey,
@@ -358,7 +361,9 @@ func ContinuousDiscovery() {
go func() {
_ = ometrics.InitMetrics()
}()
- go acceptSignals()
+ go acceptSighupSignal()
+ // On termination of the server, we should close VTOrc cleanly
+ servenv.OnTermSync(closeVTOrc)
log.Infof("continuous discovery: starting")
for {
@@ -380,7 +385,6 @@ func ContinuousDiscovery() {
// Various periodic internal maintenance tasks
go func() {
if IsLeaderOrActive() {
- go inst.InjectUnseenPrimaries()
go inst.ForgetLongUnseenInstances()
go inst.ForgetUnseenInstancesDifferentlyResolved()
@@ -390,7 +394,6 @@ func ContinuousDiscovery() {
go inst.ExpireMaintenance()
go inst.ExpireCandidateInstances()
go inst.ExpireHostnameUnresolve()
- go inst.ExpireClusterDomainName()
go inst.ExpireAudit()
go inst.FlushNontrivialResolveCacheToDatabase()
go inst.ExpireStaleInstanceBinlogCoordinates()
diff --git a/go/vt/vtorc/logic/orchestrator_test.go b/go/vt/vtorc/logic/orchestrator_test.go
new file mode 100644
index 00000000000..c8f2ac3bfdc
--- /dev/null
+++ b/go/vt/vtorc/logic/orchestrator_test.go
@@ -0,0 +1,56 @@
+package logic
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWaitForLocksRelease(t *testing.T) {
+ oldShutdownWaitTime := shutdownWaitTime
+ // Restore initial values
+ defer func() {
+ shutdownWaitTime = oldShutdownWaitTime
+ }()
+
+ t.Run("No locks to wait for", func(t *testing.T) {
+ // Initially when shardsLockCounter is zero, waitForLocksRelease should run immediately
+ timeSpent := waitForLocksReleaseAndGetTimeWaitedFor()
+ assert.Less(t, timeSpent, 1*time.Second, "waitForLocksRelease should run immediately if there are no locks to wait for")
+ })
+
+ t.Run("Timeout from shutdownWaitTime", func(t *testing.T) {
+ // Increment shardsLockCounter to simulate locking of a shard
+ atomic.AddInt32(&shardsLockCounter, +1)
+ defer func() {
+ // Restore the initial value
+ atomic.StoreInt32(&shardsLockCounter, 0)
+ }()
+ shutdownWaitTime = 200 * time.Millisecond
+ timeSpent := waitForLocksReleaseAndGetTimeWaitedFor()
+ assert.Greater(t, timeSpent, 100*time.Millisecond, "waitForLocksRelease should timeout after 200 milliseconds and not before")
+ assert.Less(t, timeSpent, 300*time.Millisecond, "waitForLocksRelease should timeout after 200 milliseconds and not take any longer")
+ })
+
+ t.Run("Successful wait for locks release", func(t *testing.T) {
+ // Increment shardsLockCounter to simulate locking of a shard
+ atomic.AddInt32(&shardsLockCounter, +1)
+ shutdownWaitTime = 500 * time.Millisecond
+ // Release the locks after 200 milliseconds
+ go func() {
+ time.Sleep(200 * time.Millisecond)
+ atomic.StoreInt32(&shardsLockCounter, 0)
+ }()
+ timeSpent := waitForLocksReleaseAndGetTimeWaitedFor()
+ assert.Greater(t, timeSpent, 100*time.Millisecond, "waitForLocksRelease should wait for the locks and not return early")
+ assert.Less(t, timeSpent, 300*time.Millisecond, "waitForLocksRelease should be successful after 200 milliseconds as all the locks are released")
+ })
+}
+
+func waitForLocksReleaseAndGetTimeWaitedFor() time.Duration {
+ start := time.Now()
+ waitForLocksRelease()
+ return time.Since(start)
+}
diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go
index 85ccddc2210..169057d3c3d 100644
--- a/go/vt/vtorc/logic/tablet_discovery.go
+++ b/go/vt/vtorc/logic/tablet_discovery.go
@@ -26,6 +26,8 @@ import (
"github.com/spf13/pflag"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
+
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
@@ -34,8 +36,6 @@ import (
"vitess.io/vitess/go/vt/vtorc/config"
- "github.com/openark/golib/sqlutils"
-
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topotools"
@@ -72,9 +72,6 @@ func OpenTabletDiscovery() <-chan time.Time {
if _, err := db.ExecVTOrc("delete from vitess_tablet"); err != nil {
log.Error(err)
}
- refreshTabletsUsing(func(instanceKey *inst.InstanceKey) {
- _ = inst.InjectSeed(instanceKey)
- }, false /* forceRefresh */)
return time.Tick(time.Second * time.Duration(config.Config.TopoInformationRefreshSeconds)) //nolint SA1015: using time.Tick leaks the underlying ticker
}
@@ -202,20 +199,21 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an
// Discover new tablets.
// TODO(sougou): enhance this to work with multi-schema,
// where each instanceKey can have multiple tablets.
- latestInstances := make(map[inst.InstanceKey]bool)
+ latestInstances := make(map[string]bool)
+ var wg sync.WaitGroup
for _, tabletInfo := range tablets {
tablet := tabletInfo.Tablet
- if tablet.MysqlHostname == "" {
+ if tablet.Type != topodatapb.TabletType_PRIMARY && !topo.IsReplicaType(tablet.Type) {
continue
}
- if tablet.Type != topodatapb.TabletType_PRIMARY && !topo.IsReplicaType(tablet.Type) {
+ latestInstances[topoproto.TabletAliasString(tablet.Alias)] = true
+ if tablet.MysqlHostname == "" {
continue
}
instanceKey := inst.InstanceKey{
Hostname: tablet.MysqlHostname,
Port: int(tablet.MysqlPort),
}
- latestInstances[instanceKey] = true
old, err := inst.ReadTablet(instanceKey)
if err != nil && err != inst.ErrTabletAliasNil {
log.Error(err)
@@ -228,9 +226,14 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an
log.Error(err)
continue
}
- loader(&instanceKey)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ loader(&instanceKey)
+ }()
log.Infof("Discovered: %v", tablet)
}
+ wg.Wait()
// Forget tablets that were removed.
toForget := make(map[inst.InstanceKey]*topodatapb.Tablet)
@@ -239,12 +242,12 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an
Hostname: row.GetString("hostname"),
Port: row.GetInt("port"),
}
- if !latestInstances[curKey] {
- tablet := &topodatapb.Tablet{}
- if err := prototext.Unmarshal([]byte(row.GetString("info")), tablet); err != nil {
- log.Error(err)
- return nil
- }
+ tablet := &topodatapb.Tablet{}
+ if err := prototext.Unmarshal([]byte(row.GetString("info")), tablet); err != nil {
+ log.Error(err)
+ return nil
+ }
+ if !latestInstances[topoproto.TabletAliasString(tablet.Alias)] {
toForget[curKey] = tablet
}
return nil
@@ -285,34 +288,38 @@ func LockShard(ctx context.Context, instanceKey inst.InstanceKey) (context.Conte
if err != nil {
return nil, nil, err
}
- ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Config.LockShardTimeoutSeconds)*time.Second)
+
atomic.AddInt32(&shardsLockCounter, 1)
ctx, unlock, err := ts.LockShard(ctx, tablet.Keyspace, tablet.Shard, "Orc Recovery")
if err != nil {
- cancel()
atomic.AddInt32(&shardsLockCounter, -1)
return nil, nil, err
}
return ctx, func(e *error) {
defer atomic.AddInt32(&shardsLockCounter, -1)
unlock(e)
- cancel()
}, nil
}
// tabletUndoDemotePrimary calls the said RPC for the given tablet.
func tabletUndoDemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, semiSync bool) error {
- return tmc.UndoDemotePrimary(ctx, tablet, semiSync)
+ tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
+ defer tmcCancel()
+ return tmc.UndoDemotePrimary(tmcCtx, tablet, semiSync)
}
// setReadOnly calls the said RPC for the given tablet
func setReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error {
- return tmc.SetReadOnly(ctx, tablet)
+ tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
+ defer tmcCancel()
+ return tmc.SetReadOnly(tmcCtx, tablet)
}
// setReplicationSource calls the said RPC with the parameters provided
func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, primary *topodatapb.Tablet, semiSync bool) error {
- return tmc.SetReplicationSource(ctx, replica, primary.Alias, 0, "", true, semiSync)
+ tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
+ defer tmcCancel()
+ return tmc.SetReplicationSource(tmcCtx, replica, primary.Alias, 0, "", true, semiSync)
}
// shardPrimary finds the primary of the given keyspace-shard by reading the vtorc backend
diff --git a/go/vt/vtorc/logic/tablet_discovery_test.go b/go/vt/vtorc/logic/tablet_discovery_test.go
index 64262eff250..7790fa997d9 100644
--- a/go/vt/vtorc/logic/tablet_discovery_test.go
+++ b/go/vt/vtorc/logic/tablet_discovery_test.go
@@ -18,16 +18,23 @@ package logic
import (
"context"
+ "fmt"
+ "sync/atomic"
"testing"
+ "time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
+
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/proto/vttime"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil"
"vitess.io/vitess/go/vt/vtorc/db"
"vitess.io/vitess/go/vt/vtorc/inst"
)
@@ -137,10 +144,33 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) {
verifyRefreshTabletsInKeyspaceShard(t, true, 3, tablets)
})
+ t.Run("tablet shutdown removes mysql hostname and port. We shouldn't forget the tablet", func(t *testing.T) {
+ defer func() {
+ _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error {
+ tablet.MysqlHostname = hostname
+ tablet.MysqlPort = 100
+ return nil
+ })
+ }()
+ // Let's assume tab100 shutdown. This would clear its tablet hostname and port
+ _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error {
+ tablet.MysqlHostname = ""
+ tablet.MysqlPort = 0
+ return nil
+ })
+ require.NoError(t, err)
+ // We expect no tablets to be refreshed. Also, tab100 shouldn't be forgotten
+ verifyRefreshTabletsInKeyspaceShard(t, false, 0, tablets)
+ })
+
t.Run("change a tablet and call refreshTabletsInKeyspaceShard again", func(t *testing.T) {
startTimeInitially := tab100.PrimaryTermStartTime.Seconds
defer func() {
tab100.PrimaryTermStartTime.Seconds = startTimeInitially
+ _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error {
+ tablet.PrimaryTermStartTime.Seconds = startTimeInitially
+ return nil
+ })
}()
tab100.PrimaryTermStartTime.Seconds = 1000
_, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error {
@@ -151,6 +181,26 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) {
// We expect 1 tablet to be refreshed since that is the only one that has changed
verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets)
})
+
+ t.Run("change the port and call refreshTabletsInKeyspaceShard again", func(t *testing.T) {
+ defer func() {
+ _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error {
+ tablet.MysqlPort = 100
+ return nil
+ })
+ tab100.MysqlPort = 100
+ }()
+ // Let's assume tab100 restarted on a different pod. This would change its tablet hostname and port
+ _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error {
+ tablet.MysqlPort = 39293
+ return nil
+ })
+ require.NoError(t, err)
+ tab100.MysqlPort = 39293
+ // We expect 1 tablet to be refreshed since that is the only one that has changed
+ // Also the old tablet should be forgotten
+ verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets)
+ })
}
func TestShardPrimary(t *testing.T) {
@@ -224,17 +274,19 @@ func TestShardPrimary(t *testing.T) {
// verifyRefreshTabletsInKeyspaceShard calls refreshTabletsInKeyspaceShard with the forceRefresh parameter provided and verifies that
// the number of instances refreshed matches the parameter and all the tablets match the ones provided
func verifyRefreshTabletsInKeyspaceShard(t *testing.T, forceRefresh bool, instanceRefreshRequired int, tablets []*topodatapb.Tablet) {
- instancesRefreshed := 0
+ var instancesRefreshed atomic.Int32
+ instancesRefreshed.Store(0)
// call refreshTabletsInKeyspaceShard while counting all the instances that are refreshed
refreshTabletsInKeyspaceShard(context.Background(), keyspace, shard, func(instanceKey *inst.InstanceKey) {
- instancesRefreshed++
+ instancesRefreshed.Add(1)
}, forceRefresh)
// Verify that all the tablets are present in the database
for _, tablet := range tablets {
verifyTabletInfo(t, tablet, "")
}
+ verifyTabletCount(t, len(tablets))
// Verify that refresh as many tablets as expected
- assert.EqualValues(t, instanceRefreshRequired, instancesRefreshed)
+ assert.EqualValues(t, instanceRefreshRequired, instancesRefreshed.Load())
}
// verifyTabletInfo verifies that the tablet information read from the vtorc database
@@ -255,3 +307,143 @@ func verifyTabletInfo(t *testing.T, tabletWanted *topodatapb.Tablet, errString s
assert.Empty(t, diff)
}
}
+
+// verifyTabletCount verifies that the number of tablets in the vitess_tablet table match the given count
+func verifyTabletCount(t *testing.T, countWanted int) {
+ t.Helper()
+ totalTablets := 0
+ err := db.QueryVTOrc("select count(*) as total_tablets from vitess_tablet", nil, func(rowMap sqlutils.RowMap) error {
+ totalTablets = rowMap.GetInt("total_tablets")
+ return nil
+ })
+ require.NoError(t, err)
+ require.Equal(t, countWanted, totalTablets)
+}
+
+func TestSetReadOnly(t *testing.T) {
+ tests := []struct {
+ name string
+ tablet *topodatapb.Tablet
+ tmc *testutil.TabletManagerClient
+ remoteOpTimeout time.Duration
+ errShouldContain string
+ }{
+ {
+ name: "Success",
+ tablet: tab100,
+ tmc: &testutil.TabletManagerClient{
+ SetReadOnlyResults: map[string]error{
+ "zone-1-0000000100": nil,
+ },
+ },
+ }, {
+ name: "Failure",
+ tablet: tab100,
+ tmc: &testutil.TabletManagerClient{
+ SetReadOnlyResults: map[string]error{
+ "zone-1-0000000100": fmt.Errorf("testing error"),
+ },
+ },
+ errShouldContain: "testing error",
+ }, {
+ name: "Timeout",
+ tablet: tab100,
+ remoteOpTimeout: 100 * time.Millisecond,
+ tmc: &testutil.TabletManagerClient{
+ SetReadOnlyResults: map[string]error{
+ "zone-1-0000000100": nil,
+ },
+ SetReadOnlyDelays: map[string]time.Duration{
+ "zone-1-0000000100": 200 * time.Millisecond,
+ },
+ },
+ errShouldContain: "context deadline exceeded",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ oldTmc := tmc
+ oldRemoteOpTimeout := topo.RemoteOperationTimeout
+ defer func() {
+ tmc = oldTmc
+ topo.RemoteOperationTimeout = oldRemoteOpTimeout
+ }()
+
+ tmc = tt.tmc
+ if tt.remoteOpTimeout != 0 {
+ topo.RemoteOperationTimeout = tt.remoteOpTimeout
+ }
+
+ err := setReadOnly(context.Background(), tt.tablet)
+ if tt.errShouldContain == "" {
+ require.NoError(t, err)
+ return
+ }
+ require.ErrorContains(t, err, tt.errShouldContain)
+ })
+ }
+}
+
+func TestTabletUndoDemotePrimary(t *testing.T) {
+ tests := []struct {
+ name string
+ tablet *topodatapb.Tablet
+ tmc *testutil.TabletManagerClient
+ remoteOpTimeout time.Duration
+ errShouldContain string
+ }{
+ {
+ name: "Success",
+ tablet: tab100,
+ tmc: &testutil.TabletManagerClient{
+ UndoDemotePrimaryResults: map[string]error{
+ "zone-1-0000000100": nil,
+ },
+ },
+ }, {
+ name: "Failure",
+ tablet: tab100,
+ tmc: &testutil.TabletManagerClient{
+ UndoDemotePrimaryResults: map[string]error{
+ "zone-1-0000000100": fmt.Errorf("testing error"),
+ },
+ },
+ errShouldContain: "testing error",
+ }, {
+ name: "Timeout",
+ tablet: tab100,
+ remoteOpTimeout: 100 * time.Millisecond,
+ tmc: &testutil.TabletManagerClient{
+ UndoDemotePrimaryResults: map[string]error{
+ "zone-1-0000000100": nil,
+ },
+ UndoDemotePrimaryDelays: map[string]time.Duration{
+ "zone-1-0000000100": 200 * time.Millisecond,
+ },
+ },
+ errShouldContain: "context deadline exceeded",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ oldTmc := tmc
+ oldRemoteOpTimeout := topo.RemoteOperationTimeout
+ defer func() {
+ tmc = oldTmc
+ topo.RemoteOperationTimeout = oldRemoteOpTimeout
+ }()
+
+ tmc = tt.tmc
+ if tt.remoteOpTimeout != 0 {
+ topo.RemoteOperationTimeout = tt.remoteOpTimeout
+ }
+
+ err := tabletUndoDemotePrimary(context.Background(), tt.tablet, false)
+ if tt.errShouldContain == "" {
+ require.NoError(t, err)
+ return
+ }
+ require.ErrorContains(t, err, tt.errShouldContain)
+ })
+ }
+}
diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go
index af12587177c..8a56cc9235b 100644
--- a/go/vt/vtorc/logic/topology_recovery.go
+++ b/go/vt/vtorc/logic/topology_recovery.go
@@ -34,7 +34,6 @@ import (
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vtctl/reparentutil"
- "vitess.io/vitess/go/vt/vtorc/attributes"
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/inst"
"vitess.io/vitess/go/vt/vtorc/util"
@@ -100,7 +99,6 @@ type RecoveryAcknowledgement struct {
Comment string
Key inst.InstanceKey
- ClusterName string
ID int64
UID string
AllRecoveries bool
@@ -109,7 +107,6 @@ type RecoveryAcknowledgement struct {
// BlockedTopologyRecovery represents an entry in the blocked_topology_recovery table
type BlockedTopologyRecovery struct {
FailedInstanceKey inst.InstanceKey
- ClusterName string
Analysis inst.AnalysisCode
LastBlockedTimestamp string
BlockingRecoveryID int64
@@ -258,6 +255,9 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly
// Read the tablet information from the database to find the shard and keyspace of the tablet
tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey)
+ if err != nil {
+ return false, nil, err
+ }
var candidateTabletAlias *topodatapb.TabletAlias
if candidateInstanceKey != nil {
@@ -289,6 +289,8 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly
log.Warningf("ERS - %s", value)
case logutilpb.Level_ERROR:
log.Errorf("ERS - %s", value)
+ default:
+ log.Infof("ERS - %s", value)
}
_ = AuditTopologyRecovery(topologyRecovery, value)
})).ReparentShard(ctx,
@@ -301,6 +303,9 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly
PreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover,
},
)
+ if err != nil {
+ log.Errorf("Error running ERS - %v", err)
+ }
if ev != nil && ev.NewPrimary != nil {
promotedReplica, _, _ = inst.ReadInstance(&inst.InstanceKey{
@@ -322,8 +327,6 @@ func postErsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.Re
if promotedReplica != nil {
// Success!
_ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: successfully promoted %+v", promotedReplica.Key))
-
- _ = attributes.SetGeneralAttribute(analysisEntry.ClusterDetails.ClusterDomain, promotedReplica.Key.StringCode())
}
}
@@ -745,7 +748,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand
// checkIfAlreadyFixed checks whether the problem that the analysis entry represents has already been fixed by another agent or not
func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) {
// Run a replication analysis again. We will check if the problem persisted
- analysisEntries, err := inst.GetReplicationAnalysis(analysisEntry.ClusterDetails.ClusterName, &inst.ReplicationAnalysisHints{})
+ analysisEntries, err := inst.GetReplicationAnalysis(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, &inst.ReplicationAnalysisHints{})
if err != nil {
return false, err
}
@@ -764,7 +767,7 @@ func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) {
// CheckAndRecover is the main entry point for the recovery mechanism
func CheckAndRecover(specificInstance *inst.InstanceKey, candidateInstanceKey *inst.InstanceKey, skipProcesses bool) (recoveryAttempted bool, promotedReplicaKey *inst.InstanceKey, err error) {
// Allow the analysis to run even if we don't want to recover
- replicationAnalysis, err := inst.GetReplicationAnalysis("", &inst.ReplicationAnalysisHints{IncludeDowntimed: true, AuditAnalysis: true})
+ replicationAnalysis, err := inst.GetReplicationAnalysis("", "", &inst.ReplicationAnalysisHints{IncludeDowntimed: true, AuditAnalysis: true})
if err != nil {
log.Error(err)
return false, nil, err
@@ -815,7 +818,6 @@ func postPrsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.Re
if promotedReplica != nil {
// Success!
_ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%+v: successfully promoted %+v", analysisEntry.Analysis, promotedReplica.Key))
- _ = attributes.SetGeneralAttribute(analysisEntry.ClusterDetails.ClusterDomain, promotedReplica.Key.StringCode())
}
}
@@ -826,7 +828,7 @@ func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis
_ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another electNewPrimary.", analysisEntry.AnalyzedInstanceKey))
return false, nil, err
}
- log.Infof("Analysis: %v, will elect a new primary: %v", analysisEntry.Analysis, analysisEntry.ClusterDetails.ClusterName)
+ log.Infof("Analysis: %v, will elect a new primary for %v:%v", analysisEntry.Analysis, analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard)
var promotedReplica *inst.Instance
// This has to be done in the end; whether successful or not, we should mark that the recovery is done.
diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go
index f1cfa284de8..65df9c1ebed 100644
--- a/go/vt/vtorc/logic/topology_recovery_dao.go
+++ b/go/vt/vtorc/logic/topology_recovery_dao.go
@@ -20,10 +20,8 @@ import (
"fmt"
"strings"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
-
- "github.com/openark/golib/sqlutils"
-
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
"vitess.io/vitess/go/vt/vtorc/inst"
@@ -39,7 +37,8 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis
process.ThisHostname,
util.ProcessToken.Hash,
string(analysisEntry.Analysis),
- analysisEntry.ClusterDetails.ClusterName,
+ analysisEntry.ClusterDetails.Keyspace,
+ analysisEntry.ClusterDetails.Shard,
analysisEntry.CountReplicas,
analysisEntry.IsActionableRecovery,
)
@@ -59,7 +58,8 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis
processing_node_hostname,
processcing_node_token,
analysis,
- cluster_name,
+ keyspace,
+ shard,
count_affected_replicas,
is_actionable,
start_active_period
@@ -74,6 +74,7 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis
?,
?,
?,
+ ?,
%s
)
`, startActivePeriodHint)
@@ -125,7 +126,8 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover
processing_node_hostname,
processcing_node_token,
analysis,
- cluster_name,
+ keyspace,
+ shard,
count_affected_replicas,
last_detection_id
) values (
@@ -141,6 +143,7 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover
?,
?,
?,
+ ?,
(select ifnull(max(detection_id), 0) from topology_failure_detection where hostname=? and port=?)
)
`,
@@ -149,7 +152,8 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover
analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port,
process.ThisHostname, util.ProcessToken.Hash,
string(analysisEntry.Analysis),
- analysisEntry.ClusterDetails.ClusterName,
+ analysisEntry.ClusterDetails.Keyspace,
+ analysisEntry.ClusterDetails.Shard,
analysisEntry.CountReplicas,
analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port,
)
@@ -191,14 +195,14 @@ func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis, failIf
if failIfClusterInActiveRecovery {
// Let's check if this cluster has just experienced a failover and is still in active period.
// If so, we reject recovery registration to avoid flapping.
- recoveries, err := ReadInActivePeriodClusterRecovery(analysisEntry.ClusterDetails.ClusterName)
+ recoveries, err := ReadInActivePeriodClusterRecovery(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard)
if err != nil {
log.Error(err)
return nil, err
}
if len(recoveries) > 0 {
_ = RegisterBlockedRecoveries(analysisEntry, recoveries)
- errMsg := fmt.Sprintf("AttemptRecoveryRegistration: cluster %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.ClusterName, recoveries[0].AnalysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey)
+ errMsg := fmt.Sprintf("AttemptRecoveryRegistration: keyspace %+v shard %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey)
log.Errorf(errMsg)
return nil, fmt.Errorf(errMsg)
}
@@ -248,7 +252,8 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking
into blocked_topology_recovery (
hostname,
port,
- cluster_name,
+ keyspace,
+ shard,
analysis,
last_blocked_timestamp,
blocking_recovery_id
@@ -257,17 +262,20 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking
?,
?,
?,
+ ?,
NOW(),
?
)
on duplicate key update
- cluster_name=values(cluster_name),
+ keyspace=values(keyspace),
+ shard=values(shard),
analysis=values(analysis),
last_blocked_timestamp=values(last_blocked_timestamp),
blocking_recovery_id=values(blocking_recovery_id)
`, analysisEntry.AnalyzedInstanceKey.Hostname,
analysisEntry.AnalyzedInstanceKey.Port,
- analysisEntry.ClusterDetails.ClusterName,
+ analysisEntry.ClusterDetails.Keyspace,
+ analysisEntry.ClusterDetails.Shard,
string(analysisEntry.Analysis),
recovery.ID,
)
@@ -446,7 +454,8 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog
ifnull(successor_port, 0) as successor_port,
ifnull(successor_alias, '') as successor_alias,
analysis,
- cluster_name,
+ keyspace,
+ shard,
count_affected_replicas,
participating_instances,
lost_replicas,
@@ -478,7 +487,8 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog
topologyRecovery.AnalysisEntry.AnalyzedInstanceKey.Hostname = m.GetString("hostname")
topologyRecovery.AnalysisEntry.AnalyzedInstanceKey.Port = m.GetInt("port")
topologyRecovery.AnalysisEntry.Analysis = inst.AnalysisCode(m.GetString("analysis"))
- topologyRecovery.AnalysisEntry.ClusterDetails.ClusterName = m.GetString("cluster_name")
+ topologyRecovery.AnalysisEntry.ClusterDetails.Keyspace = m.GetString("keyspace")
+ topologyRecovery.AnalysisEntry.ClusterDetails.Shard = m.GetString("shard")
topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_replicas")
topologyRecovery.SuccessorKey = &inst.InstanceKey{}
@@ -511,12 +521,13 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog
// ReadInActivePeriodClusterRecovery reads recoveries (possibly complete!) that are in active period.
// (may be used to block further recoveries on this cluster)
-func ReadInActivePeriodClusterRecovery(clusterName string) ([]*TopologyRecovery, error) {
+func ReadInActivePeriodClusterRecovery(keyspace string, shard string) ([]*TopologyRecovery, error) {
whereClause := `
where
in_active_period=1
- and cluster_name=?`
- return readRecoveries(whereClause, ``, sqlutils.Args(clusterName))
+ and keyspace=?
+ and shard=?`
+ return readRecoveries(whereClause, ``, sqlutils.Args(keyspace, shard))
}
// ReadInActivePeriodSuccessorInstanceRecovery reads completed recoveries for a given instance, where said instance
@@ -531,17 +542,13 @@ func ReadInActivePeriodSuccessorInstanceRecovery(instanceKey *inst.InstanceKey)
}
// ReadRecentRecoveries reads latest recovery entries from topology_recovery
-func ReadRecentRecoveries(clusterName string, unacknowledgedOnly bool, page int) ([]*TopologyRecovery, error) {
+func ReadRecentRecoveries(unacknowledgedOnly bool, page int) ([]*TopologyRecovery, error) {
whereConditions := []string{}
whereClause := ""
args := sqlutils.Args()
if unacknowledgedOnly {
whereConditions = append(whereConditions, `acknowledged=0`)
}
- if clusterName != "" {
- whereConditions = append(whereConditions, `cluster_name=?`)
- args = append(args, clusterName)
- }
if len(whereConditions) > 0 {
whereClause = fmt.Sprintf("where %s", strings.Join(whereConditions, " and "))
}
diff --git a/go/vt/vtorc/logic/topology_recovery_dao_test.go b/go/vt/vtorc/logic/topology_recovery_dao_test.go
new file mode 100644
index 00000000000..f01e16560a8
--- /dev/null
+++ b/go/vt/vtorc/logic/topology_recovery_dao_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logic
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
+
+ "vitess.io/vitess/go/vt/vtorc/db"
+ "vitess.io/vitess/go/vt/vtorc/inst"
+)
+
+// TestTopologyRecovery tests various operations related to topology recovery like reading from and writing it to the database.
+func TestTopologyRecovery(t *testing.T) {
+ // Open the vtorc
+ // After the test completes delete everything from the vitess_tablet table
+ orcDb, err := db.OpenVTOrc()
+ require.NoError(t, err)
+ defer func() {
+ _, err = orcDb.Exec("delete from topology_recovery")
+ require.NoError(t, err)
+ }()
+
+ replicationAnalysis := inst.ReplicationAnalysis{
+ AnalyzedInstanceKey: inst.InstanceKey{
+ Hostname: hostname,
+ Port: 101,
+ },
+ TabletType: tab101.Type,
+ ClusterDetails: inst.ClusterInfo{
+ Keyspace: keyspace,
+ Shard: shard,
+ },
+ AnalyzedKeyspace: keyspace,
+ AnalyzedShard: shard,
+ Analysis: inst.ReplicaIsWritable,
+ IsReadOnly: false,
+ }
+ topologyRecovery := NewTopologyRecovery(replicationAnalysis)
+
+ t.Run("writing to topology recovery", func(t *testing.T) {
+ topologyRecovery, err = writeTopologyRecovery(topologyRecovery)
+ require.NoError(t, err)
+ // The ID field should be populated after the insert
+ require.Greater(t, topologyRecovery.ID, int64(0))
+ })
+
+ t.Run("read recoveries", func(t *testing.T) {
+ recoveries, err := ReadRecentRecoveries(false, 0)
+ require.NoError(t, err)
+ require.Len(t, recoveries, 1)
+ // Assert that the ID field matches the one that we just wrote
+ require.EqualValues(t, topologyRecovery.ID, recoveries[0].ID)
+ })
+}
+
+// TestBlockedRecoveryInsertion tests that we are able to insert into the blocked_recovery table.
+func TestBlockedRecoveryInsertion(t *testing.T) {
+ orcDb, err := db.OpenVTOrc()
+ require.NoError(t, err)
+ defer func() {
+ _, err = orcDb.Exec("delete from blocked_topology_recovery")
+ require.NoError(t, err)
+ }()
+
+ analysisEntry := &inst.ReplicationAnalysis{
+ AnalyzedInstanceKey: inst.InstanceKey{
+ Hostname: "localhost",
+ Port: 100,
+ },
+ ClusterDetails: inst.ClusterInfo{
+ Keyspace: "ks",
+ Shard: "0",
+ },
+ Analysis: inst.DeadPrimaryAndSomeReplicas,
+ }
+ blockedRecovery := &TopologyRecovery{
+ ID: 1,
+ }
+ err = RegisterBlockedRecoveries(analysisEntry, []*TopologyRecovery{blockedRecovery})
+ require.NoError(t, err)
+
+ totalBlockedRecoveries := 0
+ err = db.QueryVTOrc("select count(*) as blocked_recoveries from blocked_topology_recovery", nil, func(rowMap sqlutils.RowMap) error {
+ totalBlockedRecoveries = rowMap.GetInt("blocked_recoveries")
+ return nil
+ })
+ require.NoError(t, err)
+ // There should be 1 blocked recovery after insertion
+ require.Equal(t, 1, totalBlockedRecoveries)
+}
diff --git a/go/vt/vtorc/process/election_dao.go b/go/vt/vtorc/process/election_dao.go
index f4737217d6f..f723bd48dde 100644
--- a/go/vt/vtorc/process/election_dao.go
+++ b/go/vt/vtorc/process/election_dao.go
@@ -17,8 +17,7 @@
package process
import (
- "github.com/openark/golib/sqlutils"
-
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
diff --git a/go/vt/vtorc/process/health_dao.go b/go/vt/vtorc/process/health_dao.go
index ed2b90fcf5c..59ea557223d 100644
--- a/go/vt/vtorc/process/health_dao.go
+++ b/go/vt/vtorc/process/health_dao.go
@@ -19,10 +19,8 @@ package process
import (
"time"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
-
- "github.com/openark/golib/sqlutils"
-
"vitess.io/vitess/go/vt/vtorc/config"
"vitess.io/vitess/go/vt/vtorc/db"
)
diff --git a/go/vt/vtorc/server/api.go b/go/vt/vtorc/server/api.go
index 84b2810ba57..d488633f376 100644
--- a/go/vt/vtorc/server/api.go
+++ b/go/vt/vtorc/server/api.go
@@ -23,7 +23,6 @@ import (
"net/http"
"vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtorc/inst"
"vitess.io/vitess/go/vt/vtorc/logic"
"vitess.io/vitess/go/vt/vtorc/process"
@@ -41,6 +40,8 @@ const (
enableGlobalRecoveriesAPI = "/api/enable-global-recoveries"
replicationAnalysisAPI = "/api/replication-analysis"
healthAPI = "/debug/health"
+
+ shardWithoutKeyspaceFilteringErrorStr = "Filtering by shard without keyspace isn't supported"
)
var (
@@ -57,7 +58,6 @@ var (
// ServeHTTP implements the http.Handler interface. This is the entry point for all the api commands of VTOrc
func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request) {
apiPath := request.URL.Path
- log.Infof("HTTP API Request received: %v", apiPath)
if err := acl.CheckAccessHTTP(request, getACLPermissionLevelForAPI(apiPath)); err != nil {
acl.SendError(response, err)
return
@@ -120,18 +120,13 @@ func returnAsJSON(response http.ResponseWriter, code int, stuff any) {
// problemsAPIHandler is the handler for the problemsAPI endpoint
func problemsAPIHandler(response http.ResponseWriter, request *http.Request) {
// This api also supports filtering by shard and keyspace provided.
- // Currently, both of them have to be provided in order to filter the instances.
- // Once we split the cluster_name field into keyspace and shard, we can support
- // filtering just by keyspace as well.
shard := request.URL.Query().Get("shard")
keyspace := request.URL.Query().Get("keyspace")
- clusterName := ""
- // Override the cluster name to filter by only when both the parameters
- // are specified and not empty
- if keyspace != "" && shard != "" {
- clusterName = inst.GetClusterNameFromKeyspaceAndShard(keyspace, shard)
+ if shard != "" && keyspace == "" {
+ http.Error(response, shardWithoutKeyspaceFilteringErrorStr, http.StatusBadRequest)
+ return
}
- instances, err := inst.ReadProblemInstances(clusterName)
+ instances, err := inst.ReadProblemInstances(keyspace, shard)
if err != nil {
http.Error(response, err.Error(), http.StatusInternalServerError)
return
@@ -162,18 +157,13 @@ func enableGlobalRecoveriesAPIHandler(response http.ResponseWriter) {
// replicationAnalysisAPIHandler is the handler for the replicationAnalysisAPI endpoint
func replicationAnalysisAPIHandler(response http.ResponseWriter, request *http.Request) {
// This api also supports filtering by shard and keyspace provided.
- // Currently, both of them have to be provided in order to filter the replication analysis.
- // Once we split the cluster_name field into keyspace and shard, we can support
- // filtering just by keyspace as well.
shard := request.URL.Query().Get("shard")
keyspace := request.URL.Query().Get("keyspace")
- clusterName := ""
- // Override the cluster name to filter by only when both the parameters
- // are specified and not empty
- if keyspace != "" && shard != "" {
- clusterName = inst.GetClusterNameFromKeyspaceAndShard(keyspace, shard)
+ if shard != "" && keyspace == "" {
+ http.Error(response, shardWithoutKeyspaceFilteringErrorStr, http.StatusBadRequest)
+ return
}
- analysis, err := inst.GetReplicationAnalysis(clusterName, &inst.ReplicationAnalysisHints{})
+ analysis, err := inst.GetReplicationAnalysis(keyspace, shard, &inst.ReplicationAnalysisHints{})
if err != nil {
http.Error(response, err.Error(), http.StatusInternalServerError)
return
diff --git a/go/vt/vtorc/test/db.go b/go/vt/vtorc/test/db.go
index 199dc924619..fab5db4e500 100644
--- a/go/vt/vtorc/test/db.go
+++ b/go/vt/vtorc/test/db.go
@@ -19,10 +19,8 @@ package test
import (
"fmt"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
"vitess.io/vitess/go/vt/log"
-
- "github.com/openark/golib/sqlutils"
-
"vitess.io/vitess/go/vt/vtorc/db"
)
diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go
index 4fa748f91cf..cf030d62ce7 100644
--- a/go/vt/vtorc/test/recovery_analysis.go
+++ b/go/vt/vtorc/test/recovery_analysis.go
@@ -22,7 +22,7 @@ import (
"google.golang.org/protobuf/encoding/prototext"
- "github.com/openark/golib/sqlutils"
+ "vitess.io/vitess/go/vt/external/golib/sqlutils"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
@@ -35,6 +35,7 @@ type InfoForRecoveryAnalysis struct {
Shard string
KeyspaceType int
DurabilityPolicy string
+ IsInvalid int
IsPrimary int
IsCoPrimary int
Hostname string
@@ -47,8 +48,6 @@ type InfoForRecoveryAnalysis struct {
LogFile string
LogPos int64
IsStaleBinlogCoordinates int
- ClusterName string
- ClusterDomain string
GTIDMode string
LastCheckValid int
LastCheckPartialSuccess int
@@ -90,8 +89,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap {
rowMap := make(sqlutils.RowMap)
rowMap["binary_log_file"] = sqlutils.CellData{String: info.LogFile, Valid: true}
rowMap["binary_log_pos"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LogPos), Valid: true}
- rowMap["cluster_domain"] = sqlutils.CellData{String: info.ClusterDomain, Valid: true}
- rowMap["cluster_name"] = sqlutils.CellData{String: info.ClusterName, Valid: true}
rowMap["count_binlog_server_replicas"] = sqlutils.CellData{Valid: false}
rowMap["count_co_primary_replicas"] = sqlutils.CellData{Valid: false}
rowMap["count_delayed_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountDelayedReplicas), Valid: true}
@@ -122,6 +119,7 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap {
rowMap["is_co_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsCoPrimary), Valid: true}
rowMap["is_downtimed"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsDowntimed), Valid: true}
rowMap["is_failing_to_connect_to_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsFailingToConnectToPrimary), Valid: true}
+ rowMap["is_invalid"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsInvalid), Valid: true}
rowMap["is_last_check_valid"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LastCheckValid), Valid: true}
rowMap["is_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsPrimary), Valid: true}
rowMap["is_stale_binlog_coordinates"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsStaleBinlogCoordinates), Valid: true}
@@ -163,6 +161,4 @@ func (info *InfoForRecoveryAnalysis) SetValuesFromTabletInfo() {
info.DataCenter = info.TabletInfo.Alias.Cell
info.Keyspace = info.TabletInfo.Keyspace
info.Shard = info.TabletInfo.Shard
- info.ClusterName = fmt.Sprintf("%v:%v", info.TabletInfo.Keyspace, info.TabletInfo.Shard)
- info.ClusterDomain = fmt.Sprintf("%v:%d", info.TabletInfo.MysqlHostname, info.TabletInfo.MysqlPort)
}
diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go
index 78ace7504a2..7998425a38c 100644
--- a/go/vt/vttablet/endtoend/framework/client.go
+++ b/go/vt/vttablet/endtoend/framework/client.go
@@ -19,6 +19,7 @@ package framework
import (
"context"
"errors"
+ "sync"
"time"
"google.golang.org/protobuf/proto"
@@ -40,6 +41,7 @@ type QueryClient struct {
target *querypb.Target
server *tabletserver.TabletServer
transactionID int64
+ reservedIDMu sync.Mutex
reservedID int64
sessionStateChanges string
}
@@ -114,6 +116,8 @@ func (client *QueryClient) Commit() error {
func (client *QueryClient) Rollback() error {
defer func() { client.transactionID = 0 }()
rID, err := client.server.Rollback(client.ctx, client.target, client.transactionID)
+ client.reservedIDMu.Lock()
+ defer client.reservedIDMu.Unlock()
client.reservedID = rID
if err != nil {
return err
@@ -293,6 +297,8 @@ func (client *QueryClient) MessageAck(name string, ids []string) (int64, error)
// ReserveExecute performs a ReserveExecute.
func (client *QueryClient) ReserveExecute(query string, preQueries []string, bindvars map[string]*querypb.BindVariable) (*sqltypes.Result, error) {
+ client.reservedIDMu.Lock()
+ defer client.reservedIDMu.Unlock()
if client.reservedID != 0 {
return nil, errors.New("already reserved a connection")
}
diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go
index ade79b5d896..1f5b719d77c 100644
--- a/go/vt/vttablet/endtoend/framework/server.go
+++ b/go/vt/vttablet/endtoend/framework/server.go
@@ -116,6 +116,8 @@ func StartServer(connParams, connAppDebugParams mysql.ConnParams, dbName string)
config.SignalSchemaChangeReloadIntervalSeconds = tabletenv.Seconds(2.1)
config.SignalWhenSchemaChange = true
config.Healthcheck.IntervalSeconds = 0.1
+ config.Oltp.TxTimeoutSeconds = 5
+ config.Olap.TxTimeoutSeconds = 5
gotBytes, _ := yaml2.Marshal(config)
log.Infof("Config:\n%s", gotBytes)
return StartCustomServer(connParams, connAppDebugParams, dbName, config)
diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go
index 5f55b41beac..3693e7f4325 100644
--- a/go/vt/vttablet/endtoend/misc_test.go
+++ b/go/vt/vttablet/endtoend/misc_test.go
@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"io"
+ "math"
"net/http"
"reflect"
"strings"
@@ -947,3 +948,52 @@ func TestHexAndBitBindVar(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, `[[INT64(10) UINT64(10) INT64(2480) UINT64(2480)]]`, fmt.Sprintf("%v", qr.Rows))
}
+
+// Test will validate drop view ddls.
+func TestShowTablesWithSizes(t *testing.T) {
+ ctx := context.Background()
+ conn, err := mysql.Connect(ctx, &connParams)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ setupQueries := []string{
+ `drop view if exists show_tables_with_sizes_v1`,
+ `drop table if exists show_tables_with_sizes_t1`,
+ `drop table if exists show_tables_with_sizes_employees`,
+ `create table show_tables_with_sizes_t1 (id int primary key)`,
+ `create view show_tables_with_sizes_v1 as select * from show_tables_with_sizes_t1`,
+ `CREATE TABLE show_tables_with_sizes_employees (id INT NOT NULL, store_id INT) PARTITION BY HASH(store_id) PARTITIONS 4`,
+ }
+
+ defer func() {
+ _, _ = conn.ExecuteFetch(`drop view if exists show_tables_with_sizes_v1`, 1, false)
+ _, _ = conn.ExecuteFetch(`drop table if exists show_tables_with_sizes_t1`, 1, false)
+ _, _ = conn.ExecuteFetch(`drop table if exists show_tables_with_sizes_employees`, 1, false)
+ }()
+ for _, query := range setupQueries {
+ _, err := conn.ExecuteFetch(query, 1, false)
+ require.NoError(t, err)
+ }
+ expectTables := map[string]([]string){ // TABLE_TYPE, TABLE_COMMENT
+ "show_tables_with_sizes_t1": {"BASE TABLE", ""},
+ "show_tables_with_sizes_v1": {"VIEW", "VIEW"},
+ "show_tables_with_sizes_employees": {"BASE TABLE", ""},
+ }
+
+ rs, err := conn.ExecuteFetch(conn.BaseShowTables(), math.MaxInt, false)
+ require.NoError(t, err)
+ require.NotEmpty(t, rs.Rows)
+
+ assert.GreaterOrEqual(t, len(rs.Rows), len(expectTables))
+ matchedTables := map[string]bool{}
+ for _, row := range rs.Rows {
+ tableName := row[0].ToString()
+ vals, ok := expectTables[tableName]
+ if ok {
+ assert.Equal(t, vals[0], row[1].ToString()) // TABLE_TYPE
+ assert.Equal(t, vals[1], row[3].ToString()) // TABLE_COMMENT
+ matchedTables[tableName] = true
+ }
+ }
+ assert.Equalf(t, len(expectTables), len(matchedTables), "%v", matchedTables)
+}
diff --git a/go/vt/vttablet/endtoend/reserve_test.go b/go/vt/vttablet/endtoend/reserve_test.go
index 095370453cd..355e4d5b953 100644
--- a/go/vt/vttablet/endtoend/reserve_test.go
+++ b/go/vt/vttablet/endtoend/reserve_test.go
@@ -1032,3 +1032,17 @@ func TestFailInfiniteSessions(t *testing.T) {
client.Release())
}
}
+
+func TestReserveQueryTimeout(t *testing.T) {
+ client := framework.NewClient()
+
+ _, err := client.ReserveExecute("select sleep(19)", []string{"set sql_mode = ''"}, nil)
+ assert.NoError(t, err)
+ assert.NoError(t,
+ client.Release())
+
+ _, err = client.ReserveStreamExecute("select sleep(19)", []string{"set sql_mode = ''"}, nil)
+ assert.NoError(t, err)
+ assert.NoError(t,
+ client.Release())
+}
diff --git a/go/vt/vttablet/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go
index 6751e60f9ad..8f6546df5f1 100644
--- a/go/vt/vttablet/endtoend/transaction_test.go
+++ b/go/vt/vttablet/endtoend/transaction_test.go
@@ -321,7 +321,7 @@ func TestShutdownGracePeriod(t *testing.T) {
err := client.Begin(false)
require.NoError(t, err)
go func() {
- _, err = client.Execute("select sleep(10) from dual", nil)
+ _, err := client.Execute("select sleep(10) from dual", nil)
assert.Error(t, err)
}()
@@ -346,7 +346,7 @@ func TestShutdownGracePeriod(t *testing.T) {
err = client.Begin(false)
require.NoError(t, err)
go func() {
- _, err = client.Execute("select sleep(11) from dual", nil)
+ _, err := client.Execute("select sleep(11) from dual", nil)
assert.Error(t, err)
}()
@@ -373,7 +373,7 @@ func TestShutdownGracePeriodWithStreamExecute(t *testing.T) {
err := client.Begin(false)
require.NoError(t, err)
go func() {
- _, err = client.StreamExecute("select sleep(10) from dual", nil)
+ _, err := client.StreamExecute("select sleep(10) from dual", nil)
assert.Error(t, err)
}()
@@ -398,7 +398,7 @@ func TestShutdownGracePeriodWithStreamExecute(t *testing.T) {
err = client.Begin(false)
require.NoError(t, err)
go func() {
- _, err = client.StreamExecute("select sleep(11) from dual", nil)
+ _, err := client.StreamExecute("select sleep(11) from dual", nil)
assert.Error(t, err)
}()
@@ -425,7 +425,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) {
err := client.Begin(false)
require.NoError(t, err)
go func() {
- _, err = client.ReserveExecute("select sleep(10) from dual", nil, nil)
+ _, err := client.ReserveExecute("select sleep(10) from dual", nil, nil)
assert.Error(t, err)
}()
@@ -450,7 +450,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) {
err = client.Begin(false)
require.NoError(t, err)
go func() {
- _, err = client.ReserveExecute("select sleep(11) from dual", nil, nil)
+ _, err := client.ReserveExecute("select sleep(11) from dual", nil, nil)
assert.Error(t, err)
}()
diff --git a/go/vt/vttablet/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go
index ebe0adb22e7..a253f0e804e 100644
--- a/go/vt/vttablet/faketmclient/fake_client.go
+++ b/go/vt/vttablet/faketmclient/fake_client.go
@@ -329,7 +329,7 @@ func (client *FakeTabletManagerClient) Backup(ctx context.Context, tablet *topod
}
// RestoreFromBackup is part of the tmclient.TabletManagerClient interface.
-func (client *FakeTabletManagerClient) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time) (logutil.EventStream, error) {
+func (client *FakeTabletManagerClient) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time, allowedBackupEngines []string) (logutil.EventStream, error) {
return &eofEventStream{}, nil
}
diff --git a/go/vt/vttablet/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go
index 237c16db065..ade05f81b2f 100644
--- a/go/vt/vttablet/grpcqueryservice/server.go
+++ b/go/vt/vttablet/grpcqueryservice/server.go
@@ -272,16 +272,18 @@ func (q *query) BeginStreamExecute(request *querypb.BeginStreamExecuteRequest, s
Result: sqltypes.ResultToProto3(reply),
})
})
- errInLastPacket := stream.Send(&querypb.BeginStreamExecuteResponse{
+
+ if err != nil && state.TransactionID == 0 {
+ return vterrors.ToGRPC(err)
+ }
+
+ err = stream.Send(&querypb.BeginStreamExecuteResponse{
+ Error: vterrors.ToVTRPC(err),
TransactionId: state.TransactionID,
TabletAlias: state.TabletAlias,
SessionStateChanges: state.SessionStateChanges,
})
- if err != nil {
- return vterrors.ToGRPC(err)
- }
-
- return vterrors.ToGRPC(errInLastPacket)
+ return vterrors.ToGRPC(err)
}
// MessageStream is part of the queryservice.QueryServer interface
@@ -399,15 +401,16 @@ func (q *query) ReserveStreamExecute(request *querypb.ReserveStreamExecuteReques
Result: sqltypes.ResultToProto3(reply),
})
})
- errInLastPacket := stream.Send(&querypb.ReserveStreamExecuteResponse{
- ReservedId: state.ReservedID,
- TabletAlias: state.TabletAlias,
- })
- if err != nil {
+ if err != nil && state.ReservedID == 0 {
return vterrors.ToGRPC(err)
}
- return vterrors.ToGRPC(errInLastPacket)
+ err = stream.Send(&querypb.ReserveStreamExecuteResponse{
+ Error: vterrors.ToVTRPC(err),
+ ReservedId: state.ReservedID,
+ TabletAlias: state.TabletAlias,
+ })
+ return vterrors.ToGRPC(err)
}
// ReserveBeginExecute implements the QueryServer interface
@@ -419,8 +422,8 @@ func (q *query) ReserveBeginExecute(ctx context.Context, request *querypb.Reserv
)
state, result, err := q.server.ReserveBeginExecute(ctx, request.Target, request.PreQueries, request.PostBeginQueries, request.Query.Sql, request.Query.BindVariables, request.Options)
if err != nil {
- // if we have a valid reservedID, return the error in-band
- if state.ReservedID != 0 {
+ // if we have a valid reservedID or transactionID, return the error in-band
+ if state.TransactionID != 0 || state.ReservedID != 0 {
return &querypb.ReserveBeginExecuteResponse{
Error: vterrors.ToVTRPC(err),
TransactionId: state.TransactionID,
@@ -452,17 +455,18 @@ func (q *query) ReserveBeginStreamExecute(request *querypb.ReserveBeginStreamExe
Result: sqltypes.ResultToProto3(reply),
})
})
- errInLastPacket := stream.Send(&querypb.ReserveBeginStreamExecuteResponse{
+ if err != nil && state.ReservedID == 0 && state.TransactionID == 0 {
+ return vterrors.ToGRPC(err)
+ }
+
+ err = stream.Send(&querypb.ReserveBeginStreamExecuteResponse{
+ Error: vterrors.ToVTRPC(err),
ReservedId: state.ReservedID,
TransactionId: state.TransactionID,
TabletAlias: state.TabletAlias,
SessionStateChanges: state.SessionStateChanges,
})
- if err != nil {
- return vterrors.ToGRPC(err)
- }
-
- return vterrors.ToGRPC(errInLastPacket)
+ return vterrors.ToGRPC(err)
}
// Release implements the QueryServer interface
diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go
index 6a7ef68ab4f..a10d7d2a584 100644
--- a/go/vt/vttablet/grpctabletconn/conn.go
+++ b/go/vt/vttablet/grpctabletconn/conn.go
@@ -524,6 +524,10 @@ func (conn *gRPCQueryClient) BeginStreamExecute(ctx context.Context, target *que
return state, tabletconn.ErrorFromGRPC(err)
}
+ if ser.Error != nil {
+ return state, tabletconn.ErrorFromVTRPC(ser.Error)
+ }
+
// The last stream receive will not have a result, so callback will not be called for it.
if ser.Result == nil {
return state, nil
@@ -867,6 +871,10 @@ func (conn *gRPCQueryClient) ReserveBeginStreamExecute(ctx context.Context, targ
return state, tabletconn.ErrorFromGRPC(err)
}
+ if ser.Error != nil {
+ return state, tabletconn.ErrorFromVTRPC(ser.Error)
+ }
+
// The last stream receive will not have a result, so callback will not be called for it.
if ser.Result == nil {
return state, nil
@@ -968,6 +976,10 @@ func (conn *gRPCQueryClient) ReserveStreamExecute(ctx context.Context, target *q
return state, tabletconn.ErrorFromGRPC(err)
}
+ if ser.Error != nil {
+ return state, tabletconn.ErrorFromVTRPC(ser.Error)
+ }
+
// The last stream receive will not have a result, so callback will not be called for it.
if ser.Result == nil {
return state, nil
diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go
index 3e3c6ad3ac2..9cd7cc44a67 100644
--- a/go/vt/vttablet/grpctmclient/client.go
+++ b/go/vt/vttablet/grpctmclient/client.go
@@ -962,13 +962,16 @@ func (e *restoreFromBackupStreamAdapter) Recv() (*logutilpb.Event, error) {
}
// RestoreFromBackup is part of the tmclient.TabletManagerClient interface.
-func (client *Client) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time) (logutil.EventStream, error) {
+func (client *Client) RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time, allowedBackupEngines []string) (logutil.EventStream, error) {
c, closer, err := client.dialer.dial(ctx, tablet)
if err != nil {
return nil, err
}
- stream, err := c.RestoreFromBackup(ctx, &tabletmanagerdatapb.RestoreFromBackupRequest{BackupTime: logutil.TimeToProto(backupTime)})
+ stream, err := c.RestoreFromBackup(ctx, &tabletmanagerdatapb.RestoreFromBackupRequest{
+ BackupTime: logutil.TimeToProto(backupTime),
+ AllowedBackupEngines: allowedBackupEngines},
+ )
if err != nil {
closer.Close()
return nil, err
diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go
index 563db4e8a73..6378aa0b336 100644
--- a/go/vt/vttablet/grpctmserver/server.go
+++ b/go/vt/vttablet/grpctmserver/server.go
@@ -258,6 +258,7 @@ func (s *server) ReplicationStatus(ctx context.Context, request *tabletmanagerda
if err == nil {
response.Status = status
}
+
return response, err
}
@@ -512,7 +513,7 @@ func (s *server) RestoreFromBackup(request *tabletmanagerdatapb.RestoreFromBacku
})
})
- return s.tm.RestoreFromBackup(ctx, logger, logutil.ProtoToTime(request.GetBackupTime()))
+ return s.tm.RestoreFromBackup(ctx, logger, logutil.ProtoToTime(request.GetBackupTime()), request.AllowedBackupEngines)
}
// registration glue
diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go
index 58f1132aad0..dfd4525d60a 100644
--- a/go/vt/vttablet/onlineddl/executor.go
+++ b/go/vt/vttablet/onlineddl/executor.go
@@ -78,9 +78,9 @@ var (
)
var vexecUpdateTemplates = []string{
- `update _vt.schema_migrations set migration_status='val' where mysql_schema='val'`,
- `update _vt.schema_migrations set migration_status='val' where migration_uuid='val' and mysql_schema='val'`,
- `update _vt.schema_migrations set migration_status='val' where migration_uuid='val' and mysql_schema='val' and shard='val'`,
+ `update _vt.schema_migrations set migration_status='val1' where mysql_schema='val2'`,
+ `update _vt.schema_migrations set migration_status='val1' where migration_uuid='val2' and mysql_schema='val3'`,
+ `update _vt.schema_migrations set migration_status='val1' where migration_uuid='val2' and mysql_schema='val3' and shard='val4'`,
}
var vexecInsertTemplates = []string{
@@ -98,9 +98,16 @@ var vexecInsertTemplates = []string{
migration_context,
migration_status
) VALUES (
- 'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', 'val', FROM_UNIXTIME(0), 'val', 'val'
+ 'val1', 'val2', 'val3', 'val4', 'val5', 'val6', 'val7', 'val8', 'val9', FROM_UNIXTIME(0), 'vala', 'valb'
)`,
}
+var (
+ // fixCompletedTimestampDone fixes a nil `completed_tiemstamp` columns, see
+ // https://github.com/vitessio/vitess/issues/13927
+ // The fix is in release-18.0
+ // TODO: remove in release-19.0
+ fixCompletedTimestampDone bool
+)
var emptyResult = &sqltypes.Result{}
var acceptableDropTableIfExistsErrorCodes = []int{mysql.ERCantFindFile, mysql.ERNoSuchTable}
@@ -650,6 +657,22 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online
return acceptableErrorCodeFound, nil
}
+// doesConnectionInfoMatch checks if theres a MySQL connection in PROCESSLIST whose Info matches given text
+func (e *Executor) doesConnectionInfoMatch(ctx context.Context, connID int64, submatch string) (bool, error) {
+ findProcessQuery, err := sqlparser.ParseAndBind(sqlFindProcess,
+ sqltypes.Int64BindVariable(connID),
+ sqltypes.StringBindVariable("%"+submatch+"%"),
+ )
+ if err != nil {
+ return false, err
+ }
+ rs, err := e.execQuery(ctx, findProcessQuery)
+ if err != nil {
+ return false, err
+ }
+ return len(rs.Rows) == 1, nil
+}
+
// validateTableForAlterAction checks whether a table is good to undergo a ALTER operation. It returns detailed error if not.
func (e *Executor) validateTableForAlterAction(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) {
// Validate table does not participate in foreign key relationship:
@@ -718,6 +741,10 @@ func (e *Executor) terminateVReplMigration(ctx context.Context, uuid string) err
// cutOverVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration
func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) error {
+ if err := e.incrementCutoverAttempts(ctx, s.workflow); err != nil {
+ return err
+ }
+
tmClient := e.tabletManagerClient()
defer tmClient.Close()
@@ -739,31 +766,101 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er
return err
}
isVreplicationTestSuite := onlineDDL.StrategySetting().IsVreplicationTestSuite()
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "starting cut-over")
+
+ var sentryTableName string
+
+ waitForPos := func(s *VReplStream, pos mysql.Position) error {
+ ctx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold)
+ defer cancel()
+ // Wait for target to reach the up-to-date pos
+ if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, int(s.id), mysql.EncodePosition(pos)); err != nil {
+ return err
+ }
+ // Target is now in sync with source!
+ return nil
+ }
- // A bit early on, we generate names for stowaway and temporary tables
- // We do this here because right now we're in a safe place where nothing happened yet. If there's an error now, bail out
- // and no harm done.
- // Later on, when traffic is blocked and tables renamed, that's a more dangerous place to be in; we want as little logic
- // in that place as possible.
- var stowawayTableName string
if !isVreplicationTestSuite {
- stowawayTableName, err = schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime())
+ // A bit early on, we generate names for stowaway and temporary tables
+ // We do this here because right now we're in a safe place where nothing happened yet. If there's an error now, bail out
+ // and no harm done.
+ // Later on, when traffic is blocked and tables renamed, that's a more dangerous place to be in; we want as little logic
+ // in that place as possible.
+ sentryTableName, err = schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime())
if err != nil {
+ return nil
+ }
+
+ // We create the sentry table before toggling writes, because this involves a WaitForPos, which takes some time. We
+ // don't want to overload the buffering time with this excessive wait.
+
+ if err := e.updateArtifacts(ctx, onlineDDL.UUID, sentryTableName); err != nil {
return err
}
- // Audit stowawayTableName. If operation is complete, we remove the audit. But if this tablet fails while
- // the original table is renamed (into stowaway table), then this will be both the evidence and the information we need
- // to restore the table back into existence. This can (and will) be done by a different vttablet process
- if err := e.updateMigrationStowawayTable(ctx, onlineDDL.UUID, stowawayTableName); err != nil {
+ parsed := sqlparser.BuildParsedQuery(sqlCreateSentryTable, sentryTableName)
+ if _, err := e.execQuery(ctx, parsed.Query); err != nil {
return err
}
- defer e.updateMigrationStowawayTable(ctx, onlineDDL.UUID, "")
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "sentry table created: %s", sentryTableName)
+
+ postSentryPos, err := e.primaryPosition(ctx)
+ if err != nil {
+ return err
+ }
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", mysql.EncodePosition(postSentryPos))
+ if err := waitForPos(s, postSentryPos); err != nil {
+ return err
+ }
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "post-sentry pos reached")
+ }
+
+ lockConn, err := e.pool.Get(ctx, nil)
+ if err != nil {
+ return err
+ }
+ defer lockConn.Recycle()
+ defer lockConn.Exec(ctx, sqlUnlockTables, 1, false)
+
+ renameConn, err := e.pool.Get(ctx, nil)
+ if err != nil {
+ return err
+ }
+ defer renameConn.Recycle()
+ defer renameConn.Kill("premature exit while renaming tables", 0)
+ renameQuery := sqlparser.BuildParsedQuery(sqlSwapTables, onlineDDL.Table, sentryTableName, vreplTable, onlineDDL.Table, sentryTableName, vreplTable)
+
+ waitForRenameProcess := func() error {
+ // This function waits until it finds the RENAME TABLE... query running in MySQL's PROCESSLIST, or until timeout
+ // The function assumes that one of the renamed tables is locked, thus causing the RENAME to block. If nothing
+ // is locked, then the RENAME will be near-instantaneious and it's unlikely that the function will find it.
+ renameWaitCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold)
+ defer cancel()
+
+ for {
+ renameProcessFound, err := e.doesConnectionInfoMatch(renameWaitCtx, renameConn.ID(), "rename")
+ if err != nil {
+ return err
+ }
+ if renameProcessFound {
+ return nil
+ }
+ select {
+ case <-renameWaitCtx.Done():
+ return vterrors.Errorf(vtrpcpb.Code_ABORTED, "timeout for rename query: %s", renameQuery.Query)
+ case <-time.After(time.Second):
+ // sleep
+ }
+ }
}
+ renameCompleteChan := make(chan error)
+
bufferingCtx, bufferingContextCancel := context.WithCancel(ctx)
defer bufferingContextCancel()
// Preparation is complete. We proceed to cut-over.
toggleBuffering := func(bufferQueries bool) error {
+ log.Infof("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)
e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, bufferQueries)
if !bufferQueries {
// called after new table is in place.
@@ -774,27 +871,31 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er
return err
}
}
+ log.Infof("toggled buffering: %t in migration %v", bufferQueries, onlineDDL.UUID)
return nil
}
+
var reenableOnce sync.Once
reenableWritesOnce := func() {
reenableOnce.Do(func() {
+ log.Infof("re-enabling writes in migration %v", onlineDDL.UUID)
toggleBuffering(false)
})
}
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "buffering queries")
// stop writes on source:
err = toggleBuffering(true)
defer reenableWritesOnce()
if err != nil {
return err
}
-
- // swap out the table
// Give a fraction of a second for a scenario where a query is in
// query executor, it passed the ACLs and is _about to_ execute. This will be nicer to those queries:
// they will be able to complete before the rename, rather than block briefly on the rename only to find
// the table no longer exists.
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "graceful wait for buffering")
time.Sleep(100 * time.Millisecond)
+
if isVreplicationTestSuite {
// The testing suite may inject queries internally from the server via a recurring EVENT.
// Those queries are unaffected by query rules (ACLs) because they don't go through Vitess.
@@ -805,30 +906,41 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er
if _, err := e.execQuery(ctx, parsed.Query); err != nil {
return err
}
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "test suite 'before' table renamed")
} else {
// real production
- parsed := sqlparser.BuildParsedQuery(sqlRenameTable, onlineDDL.Table, stowawayTableName)
- if _, err := e.execQuery(ctx, parsed.Query); err != nil {
+
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "locking tables")
+ lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold)
+ defer cancel()
+ lockTableQuery := sqlparser.BuildParsedQuery(sqlLockTwoTablesWrite, sentryTableName, onlineDDL.Table)
+ if _, err := lockConn.Exec(lockCtx, lockTableQuery.Query, 1, false); err != nil {
return err
}
- }
- // We have just created a gaping hole, the original table does not exist.
- // we expect to fill that hole by swapping in the vrepl table. But if anything goes wrong we prepare
- // to rename the table back:
- defer func() {
- if _, err := e.renameTableIfApplicable(ctx, stowawayTableName, onlineDDL.Table); err != nil {
- vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "cannot rename back swapped table: %v into %v: %v", stowawayTableName, onlineDDL.Table, err)
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "renaming tables")
+ go func() {
+ _, err := renameConn.Exec(ctx, renameQuery.Query, 1, false)
+ renameCompleteChan <- err
+ }()
+ // the rename should block, because of the LOCK. Wait for it to show up.
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for RENAME to block")
+ if err := waitForRenameProcess(); err != nil {
+ return err
}
- }()
- // Right now: new queries are buffered, any existing query will have executed, and worst case scenario is
- // that some leftover query finds the table is not actually there anymore...
- // At any case, there's definitely no more writes to the table since it does not exist. We can
- // safely take the (GTID) pos now.
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "RENAME found")
+ }
+
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "reading post-lock pos")
postWritesPos, err := e.primaryPosition(ctx)
if err != nil {
return err
}
+
+ // Right now: new queries are buffered, any existing query will have executed, and worst case scenario is
+ // that some leftover query finds the table is not actually there anymore...
+ // At any case, there's definitely no more writes to the table since it does not exist. We can
+ // safely take the (GTID) pos now.
_ = e.updateMigrationTimestamp(ctx, "liveness_timestamp", s.workflow)
// Writes are now disabled on table. Read up-to-date vreplication info, specifically to get latest (and fixed) pos:
@@ -837,21 +949,13 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er
return err
}
- waitForPos := func() error {
- ctx, cancel := context.WithTimeout(ctx, 2*vreplicationCutOverThreshold)
- defer cancel()
- // Wait for target to reach the up-to-date pos
- if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, int(s.id), mysql.EncodePosition(postWritesPos)); err != nil {
- return err
- }
- // Target is now in sync with source!
- return nil
- }
- log.Infof("VReplication migration %v waiting for position %v", s.workflow, mysql.EncodePosition(postWritesPos))
- if err := waitForPos(); err != nil {
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", mysql.EncodePosition(postWritesPos))
+ if err := waitForPos(s, postWritesPos); err != nil {
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "timeout while waiting for post-lock pos: %v", err)
return err
}
// Stop vreplication
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "stopping vreplication")
if _, err := e.vreplicationExec(ctx, tablet.Tablet, binlogplayer.StopVReplication(uint32(s.id), "stopped for online DDL cutover")); err != nil {
return err
}
@@ -865,23 +969,43 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er
if _, err := e.execQuery(ctx, parsed.Query); err != nil {
return err
}
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "test suite 'after' table renamed")
} else {
- // Normal (non-testing) alter table
- conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB())
- if err != nil {
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "validating rename is still in place")
+ if err := waitForRenameProcess(); err != nil {
return err
}
- defer conn.Close()
- parsed := sqlparser.BuildParsedQuery(sqlRenameTwoTables,
- vreplTable, onlineDDL.Table,
- stowawayTableName, vreplTable,
- )
- if _, err := e.execQuery(ctx, parsed.Query); err != nil {
- return err
+ // Normal (non-testing) alter table
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "dropping sentry table")
+
+ {
+ dropTableQuery := sqlparser.BuildParsedQuery(sqlDropTable, sentryTableName)
+ lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold)
+ defer cancel()
+ if _, err := lockConn.Exec(lockCtx, dropTableQuery.Query, 1, false); err != nil {
+ return err
+ }
+ }
+ {
+ lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold)
+ defer cancel()
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "unlocking tables")
+ if _, err := lockConn.Exec(lockCtx, sqlUnlockTables, 1, false); err != nil {
+ return err
+ }
+ }
+ {
+ lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold)
+ defer cancel()
+ e.updateMigrationStage(lockCtx, onlineDDL.UUID, "waiting for RENAME to complete")
+ if err := <-renameCompleteChan; err != nil {
+ return err
+ }
}
}
}
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "cut-over complete")
e.ownedRunningMigrations.Delete(onlineDDL.UUID)
go func() {
@@ -896,12 +1020,12 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er
}()
// Tables are now swapped! Migration is successful
+ e.updateMigrationStage(ctx, onlineDDL.UUID, "re-enabling writes")
reenableWritesOnce() // this function is also deferred, in case of early return; but now would be a good time to resume writes, before we publish the migration as "complete"
_ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow, s.rowsCopied, emptyHint)
return nil
// deferred function will re-enable writes now
- // deferred function will unlock keyspace
}
// initMigrationSQLMode sets sql_mode according to DDL strategy, and returns a function that
@@ -2013,7 +2137,7 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error {
postponeLaunch := row.AsBool("postpone_launch", false)
postponeCompletion := row.AsBool("postpone_completion", false)
readyToComplete := row.AsBool("ready_to_complete", false)
- ddlAction := row["ddl_action"].ToString()
+ isImmediateOperation := row.AsBool("is_immediate_operation", false)
if postponeLaunch {
// We don't even look into this migration until its postpone_launch flag is cleared
@@ -2021,18 +2145,19 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error {
}
if !readyToComplete {
- // Whether postponsed or not, CREATE and DROP operations are inherently "ready to complete"
- // because their operation is instantaneous.
- switch ddlAction {
- case sqlparser.CreateStr, sqlparser.DropStr:
+ // see if we need to update ready_to_complete
+ if isImmediateOperation {
+ // Whether postponed or not, CREATE and DROP operations, as well as VIEW operations,
+ // are inherently "ready to complete" because their operation is immediate.
if err := e.updateMigrationReadyToComplete(ctx, uuid, true); err != nil {
return err
}
}
}
- if ddlAction == sqlparser.AlterStr || !postponeCompletion {
+
+ if !(isImmediateOperation && postponeCompletion) {
// Any non-postponed migration can be scheduled
- // postponed ALTER can be scheduled
+ // postponed ALTER can be scheduled (because gh-ost or vreplication will postpone the cut-over)
// We only schedule a single migration in the execution of this function
onlyScheduleOneMigration.Do(func() {
err = e.updateMigrationStatus(ctx, uuid, schema.OnlineDDLStatusReady)
@@ -2046,70 +2171,137 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error {
return err
}
-// reviewQueuedMigrations iterates queued migrations and sees if any information needs to be updated
+// reviewEmptyTableRevertMigrations reviews a queued REVERT migration. Such a migration has the following SQL:
+// "REVERT VITESS_MIGRATION '...'"
+// There's nothing in this SQL to indicate:
+// - which table is involved?
+// - is this a table or a view?
+// - Are we reverting a CREATE? A DROP? An ALTER?
+// This function fills in the blanks and updates the database row.
+func (e *Executor) reviewEmptyTableRevertMigrations(ctx context.Context, onlineDDL *schema.OnlineDDL) (changesMade bool, err error) {
+ if onlineDDL.Table != "" {
+ return false, nil
+ }
+ // Table name is empty. Let's populate it.
+
+ // Try to update table name and ddl_action
+ // Failure to do so fails the migration
+ revertUUID, err := onlineDDL.GetRevertUUID()
+ if err != nil {
+ return false, e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot analyze revert UUID for revert migration %s: %v", onlineDDL.UUID, err))
+ }
+ revertedMigration, revertedRow, err := e.readMigration(ctx, revertUUID)
+ if err != nil {
+ return false, e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot read migration %s reverted by migration %s: %s", revertUUID, onlineDDL.UUID, err))
+ }
+ revertedActionStr := revertedRow["ddl_action"].ToString()
+
+ mimickedActionStr := ""
+ switch revertedActionStr {
+ case sqlparser.CreateStr:
+ mimickedActionStr = sqlparser.DropStr
+ case sqlparser.DropStr:
+ mimickedActionStr = sqlparser.CreateStr
+ case sqlparser.AlterStr:
+ mimickedActionStr = sqlparser.AlterStr
+ default:
+ return false, e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot run migration %s reverting %s: unexpected action %s", onlineDDL.UUID, revertedMigration.UUID, revertedActionStr))
+ }
+ if err := e.updateDDLAction(ctx, onlineDDL.UUID, mimickedActionStr); err != nil {
+ return false, err
+ }
+ if err := e.updateMigrationIsView(ctx, onlineDDL.UUID, revertedRow.AsBool("is_view", false)); err != nil {
+ return false, err
+ }
+ if err := e.updateMySQLTable(ctx, onlineDDL.UUID, revertedMigration.Table); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// reviewImmediateOperations reviews a queued migration and determines whether it is an "immediate operation".
+// Immediate operations are ones that can be performed within a split second, or rather, do not require long
+// running processes. Immediate operations are:
+// - CREATE TABLE
+// - DROP TABLE (which we convert into RENAME)
+// - All VIEW operations
+// - An INSTANT DDL accompanied by relevant ddl strategy flags
+// Non immediate operations are:
+// - A gh-ost migration
+// - A vitess (vreplication) migration
+func (e *Executor) reviewImmediateOperations(ctx context.Context, capableOf mysql.CapableOf, onlineDDL *schema.OnlineDDL, ddlAction string, isView bool) error {
+ isImmediateOperation := false
+ switch ddlAction {
+ case sqlparser.CreateStr, sqlparser.DropStr:
+ isImmediateOperation = true
+ case sqlparser.AlterStr:
+ if isView {
+ isImmediateOperation = true
+ } else {
+ specialPlan, err := e.analyzeSpecialAlterPlan(ctx, onlineDDL, capableOf)
+ if err != nil {
+ return err
+ }
+ if specialPlan != nil {
+ isImmediateOperation = true
+ }
+ }
+ }
+ if isImmediateOperation {
+ if err := e.updateMigrationSetImmediateOperation(ctx, onlineDDL.UUID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// reviewQueuedMigrations iterates through queued migrations and sees if any information needs to be updated.
+// The function analyzes the queued migration and fills in some blanks:
+// - If this is a REVERT migration, what table is affected? What's the operation?
+// - Is this migration an "immediate operation"?
func (e *Executor) reviewQueuedMigrations(ctx context.Context) error {
+ conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB())
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+ _, capableOf, _ := mysql.GetFlavor(conn.ServerVersion, nil)
+
e.migrationMutex.Lock()
defer e.migrationMutex.Unlock()
- // Review REVERT migrations
- // These migrations are submitted with some details missing. This is because the statement
- // REVERT VITESS_MIGRATION ''
- // doesn't have much detail, we need to extract the info from the reverted migration. Missing details:
- // - What table is affected?
- // - What ddl action (CREATE, DROP, ALTER) is being reverted, or what is the counter-operation to be executed?
-
- r, err := e.execQuery(ctx, sqlSelectQueuedRevertMigrations)
+ r, err := e.execQuery(ctx, sqlSelectQueuedUnreviewedMigrations)
if err != nil {
return err
}
- for _, row := range r.Named().Rows {
- uuid := row["migration_uuid"].ToString()
- onlineDDL, _, err := e.readMigration(ctx, uuid)
+ for _, uuidRow := range r.Named().Rows {
+ uuid := uuidRow["migration_uuid"].ToString()
+ onlineDDL, row, err := e.readMigration(ctx, uuid)
if err != nil {
return err
}
- reviewEmptyTableRevertMigrations := func() error {
- if onlineDDL.Table != "" {
- return nil
- }
- // Table name is empty. Let's populate it.
-
- // Try to update table name and ddl_action
- // Failure to do so fails the migration
- revertUUID, err := onlineDDL.GetRevertUUID()
- if err != nil {
- return e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot analyze revert UUID for revert migration %s: %v", onlineDDL.UUID, err))
- }
- revertedMigration, row, err := e.readMigration(ctx, revertUUID)
+ // handle REVERT migrations: populate table name and update ddl action and is_view:
+ ddlAction := row["ddl_action"].ToString()
+ if ddlAction == schema.RevertActionStr {
+ rowModified, err := e.reviewEmptyTableRevertMigrations(ctx, onlineDDL)
if err != nil {
- return e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot read migration %s reverted by migration %s: %s", revertUUID, onlineDDL.UUID, err))
- }
- revertedActionStr := row["ddl_action"].ToString()
- mimickedActionStr := ""
-
- switch revertedActionStr {
- case sqlparser.CreateStr:
- mimickedActionStr = sqlparser.DropStr
- case sqlparser.DropStr:
- mimickedActionStr = sqlparser.CreateStr
- case sqlparser.AlterStr:
- mimickedActionStr = sqlparser.AlterStr
- default:
- return e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot run migration %s reverting %s: unexpected action %s", onlineDDL.UUID, revertedMigration.UUID, revertedActionStr))
- }
- if err := e.updateDDLAction(ctx, onlineDDL.UUID, mimickedActionStr); err != nil {
- return err
- }
- if err := e.updateMigrationIsView(ctx, onlineDDL.UUID, row.AsBool("is_view", false)); err != nil {
return err
}
- if err := e.updateMySQLTable(ctx, onlineDDL.UUID, revertedMigration.Table); err != nil {
- return err
+ if rowModified {
+ // re-read migration and entire row
+ onlineDDL, row, err = e.readMigration(ctx, uuid)
+ if err != nil {
+ return err
+ }
+ ddlAction = row["ddl_action"].ToString()
}
- return nil
}
- if err := reviewEmptyTableRevertMigrations(); err != nil {
+ isView := row.AsBool("is_view", false)
+ if err := e.reviewImmediateOperations(ctx, capableOf, onlineDDL, ddlAction, isView); err != nil {
+ return err
+ }
+ if err := e.updateMigrationTimestamp(ctx, "reviewed_timestamp", uuid); err != nil {
return err
}
}
@@ -2609,7 +2801,7 @@ func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema
Select: viewStmt.Select,
CheckOption: viewStmt.CheckOption,
IsReplace: true,
- Comments: viewStmt.Comments,
+ Comments: sqlparser.CloneRefOfParsedComments(viewStmt.Comments),
}
stmt.SetTable("", artifactViewName)
default:
@@ -2687,6 +2879,7 @@ func (e *Executor) executeSpecialAlterDDLActionMigrationIfApplicable(ctx context
if specialPlan == nil {
return false, nil
}
+
switch specialPlan.operation {
case instantDDLSpecialOperation:
e.addInstantAlgorithm(specialPlan.alterTable)
@@ -3336,6 +3529,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i
if isReady {
if err := e.cutOverVReplMigration(ctx, s); err != nil {
_ = e.updateMigrationMessage(ctx, uuid, err.Error())
+ log.Errorf("cutOverVReplMigration failed: err=%v", err)
if merr, ok := err.(*mysql.SQLError); ok {
switch merr.Num {
case mysql.ERTooLongIdent:
@@ -3567,13 +3761,17 @@ func (e *Executor) gcArtifacts(ctx context.Context) error {
e.migrationMutex.Lock()
defer e.migrationMutex.Unlock()
- if _, err := e.execQuery(ctx, sqlFixCompletedTimestamp); err != nil {
- // This query fixes a bug where stale migrations were marked as 'failed' without updating 'completed_timestamp'
- // see https://github.com/vitessio/vitess/issues/8499
- // Running this query retroactively sets completed_timestamp
- // This 'if' clause can be removed in version v13
- return err
+ // v18 fix. Remove in v19
+ if !fixCompletedTimestampDone {
+ if _, err := e.execQuery(ctx, sqlFixCompletedTimestamp); err != nil {
+ // This query fixes a bug where stale migrations were marked as 'cancelled' or 'failed' without updating 'completed_timestamp'
+ // Running this query retroactively sets completed_timestamp
+ // This fix is created in v18 and can be removed in v19
+ return err
+ }
+ fixCompletedTimestampDone = true
}
+
query, err := sqlparser.ParseAndBind(sqlSelectUncollectedArtifacts,
sqltypes.Int64BindVariable(int64((retainOnlineDDLTables).Seconds())),
)
@@ -3772,6 +3970,31 @@ func (e *Executor) updateMigrationSpecialPlan(ctx context.Context, uuid string,
return err
}
+func (e *Executor) updateMigrationStage(ctx context.Context, uuid string, stage string, args ...interface{}) error {
+ msg := fmt.Sprintf(stage, args...)
+ log.Infof("updateMigrationStage: uuid=%s, stage=%s", uuid, msg)
+ query, err := sqlparser.ParseAndBind(sqlUpdateStage,
+ sqltypes.StringBindVariable(msg),
+ sqltypes.StringBindVariable(uuid),
+ )
+ if err != nil {
+ return err
+ }
+ _, err = e.execQuery(ctx, query)
+ return err
+}
+
+func (e *Executor) incrementCutoverAttempts(ctx context.Context, uuid string) error {
+ query, err := sqlparser.ParseAndBind(sqlIncrementCutoverAttempts,
+ sqltypes.StringBindVariable(uuid),
+ )
+ if err != nil {
+ return err
+ }
+ _, err = e.execQuery(ctx, query)
+ return err
+}
+
// updateMigrationTablet sets 'tablet' column to be this executor's tablet alias for given migration
func (e *Executor) updateMigrationTablet(ctx context.Context, uuid string) error {
query, err := sqlparser.ParseAndBind(sqlUpdateTablet,
@@ -4008,6 +4231,17 @@ func (e *Executor) updateMigrationIsView(ctx context.Context, uuid string, isVie
return err
}
+func (e *Executor) updateMigrationSetImmediateOperation(ctx context.Context, uuid string) error {
+ query, err := sqlparser.ParseAndBind(sqlUpdateMigrationSetImmediateOperation,
+ sqltypes.StringBindVariable(uuid),
+ )
+ if err != nil {
+ return err
+ }
+ _, err = e.execQuery(ctx, query)
+ return err
+}
+
func (e *Executor) updateMigrationReadyToComplete(ctx context.Context, uuid string, isReady bool) error {
query, err := sqlparser.ParseAndBind(sqlUpdateMigrationReadyToComplete,
sqltypes.BoolBindVariable(isReady),
@@ -4293,6 +4527,11 @@ func (e *Executor) SubmitMigration(
revertedUUID, _ := onlineDDL.GetRevertUUID() // Empty value if the migration is not actually a REVERT. Safe to ignore error.
retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds())
+ if retainArtifacts, _ := onlineDDL.StrategySetting().RetainArtifactsDuration(); retainArtifacts != 0 {
+ // Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override!
+ retainArtifactsSeconds = int64((retainArtifacts).Seconds())
+ }
+
_, allowConcurrentMigration := e.allowConcurrentMigration(onlineDDL)
query, err := sqlparser.ParseAndBind(sqlInsertMigration,
sqltypes.StringBindVariable(onlineDDL.UUID),
diff --git a/go/vt/vttablet/onlineddl/executor_test.go b/go/vt/vttablet/onlineddl/executor_test.go
index c41f3b24f3b..975d9d94562 100644
--- a/go/vt/vttablet/onlineddl/executor_test.go
+++ b/go/vt/vttablet/onlineddl/executor_test.go
@@ -31,6 +31,26 @@ import (
"vitess.io/vitess/go/vt/sqlparser"
)
+func TestVexecUpdateTemplates(t *testing.T) {
+ {
+ match, err := sqlparser.QueryMatchesTemplates("select 1 from dual", vexecUpdateTemplates)
+ assert.NoError(t, err)
+ assert.False(t, match)
+ }
+ queries := []string{
+ `update _vt.schema_migrations set migration_status='cancel-all' where mysql_schema='vt_commerce'`,
+ `update _vt.schema_migrations set migration_status = 'cancel-all' where migration_uuid='a5a563da_dc1a_11ec_a416_0a43f95f28a3' and mysql_schema = 'vt_commerce'`,
+ `update _vt.schema_migrations set migration_status = 'cancel-all' where migration_uuid='a5a563da_dc1a_11ec_a416_0a43f95f28a3' and mysql_schema = 'vt_commerce' and shard='0'`,
+ }
+ for _, query := range queries {
+ t.Run(query, func(t *testing.T) {
+ match, err := sqlparser.QueryMatchesTemplates(query, vexecUpdateTemplates)
+ assert.NoError(t, err)
+ assert.True(t, match)
+ })
+ }
+}
+
func TestValidateAndEditCreateTableStatement(t *testing.T) {
e := Executor{}
tt := []struct {
diff --git a/go/vt/vttablet/onlineddl/schema.go b/go/vt/vttablet/onlineddl/schema.go
index 3022b3736bc..276eb19b738 100644
--- a/go/vt/vttablet/onlineddl/schema.go
+++ b/go/vt/vttablet/onlineddl/schema.go
@@ -81,6 +81,10 @@ const (
alterSchemaMigrationsComponentThrottled = "ALTER TABLE _vt.schema_migrations add column component_throttled tinytext NOT NULL"
alterSchemaMigrationsCancelledTimestamp = "ALTER TABLE _vt.schema_migrations add column cancelled_timestamp timestamp NULL DEFAULT NULL"
alterSchemaMigrationsTablePostponeLaunch = "ALTER TABLE _vt.schema_migrations add column postpone_launch tinyint unsigned NOT NULL DEFAULT 0"
+ alterSchemaMigrationsStage = "ALTER TABLE _vt.schema_migrations add column stage text not null"
+ alterSchemaMigrationsCutoverAttempts = "ALTER TABLE _vt.schema_migrations add column cutover_attempts int unsigned NOT NULL DEFAULT 0"
+ alterSchemaMigrationsTableImmediateOperation = "ALTER TABLE _vt.schema_migrations add column is_immediate_operation tinyint unsigned NOT NULL DEFAULT 0"
+ alterSchemaMigrationsReviewedTimestamp = "ALTER TABLE _vt.schema_migrations add column reviewed_timestamp timestamp NULL DEFAULT NULL"
sqlInsertMigration = `INSERT IGNORE INTO _vt.schema_migrations (
migration_uuid,
@@ -109,6 +113,8 @@ const (
sqlSelectQueuedMigrations = `SELECT
migration_uuid,
ddl_action,
+ is_view,
+ is_immediate_operation,
postpone_launch,
postpone_completion,
ready_to_complete
@@ -127,7 +133,8 @@ const (
migration_uuid=%a
`
sqlUpdateMigrationStatusFailedOrCancelled = `UPDATE _vt.schema_migrations
- SET migration_status=IF(cancelled_timestamp IS NULL, 'failed', 'cancelled')
+ SET migration_status=IF(cancelled_timestamp IS NULL, 'failed', 'cancelled'),
+ completed_timestamp=NOW(6)
WHERE
migration_uuid=%a
`
@@ -151,6 +158,11 @@ const (
WHERE
migration_uuid=%a
`
+ sqlUpdateMigrationSetImmediateOperation = `UPDATE _vt.schema_migrations
+ SET is_immediate_operation=1
+ WHERE
+ migration_uuid=%a
+ `
sqlUpdateMigrationReadyToComplete = `UPDATE _vt.schema_migrations
SET ready_to_complete=%a
WHERE
@@ -202,6 +214,16 @@ const (
WHERE
migration_uuid=%a
`
+ sqlUpdateStage = `UPDATE _vt.schema_migrations
+ SET stage=%a
+ WHERE
+ migration_uuid=%a
+ `
+ sqlIncrementCutoverAttempts = `UPDATE _vt.schema_migrations
+ SET cutover_attempts=cutover_attempts+1
+ WHERE
+ migration_uuid=%a
+ `
sqlUpdateReadyForCleanup = `UPDATE _vt.schema_migrations
SET retain_artifacts_seconds=-1
WHERE
@@ -284,6 +306,8 @@ const (
retries=retries + 1,
tablet_failure=0,
message='',
+ stage='',
+ cutover_attempts=0,
ready_timestamp=NULL,
started_timestamp=NULL,
liveness_timestamp=NULL,
@@ -302,6 +326,8 @@ const (
retries=retries + 1,
tablet_failure=0,
message='',
+ stage='',
+ cutover_attempts=0,
ready_timestamp=NULL,
started_timestamp=NULL,
liveness_timestamp=NULL,
@@ -365,12 +391,12 @@ const (
WHERE
migration_status IN ('queued', 'ready', 'running')
`
- sqlSelectQueuedRevertMigrations = `SELECT
+ sqlSelectQueuedUnreviewedMigrations = `SELECT
migration_uuid
FROM _vt.schema_migrations
WHERE
migration_status='queued'
- AND ddl_action='revert'
+ AND reviewed_timestamp IS NULL
`
sqlSelectUncollectedArtifacts = `SELECT
migration_uuid,
@@ -378,7 +404,7 @@ const (
log_path
FROM _vt.schema_migrations
WHERE
- migration_status IN ('complete', 'failed')
+ migration_status IN ('complete', 'cancelled', 'failed')
AND cleanup_timestamp IS NULL
AND completed_timestamp <= IF(retain_artifacts_seconds=0,
NOW() - INTERVAL %a SECOND,
@@ -389,10 +415,16 @@ const (
SET
completed_timestamp=NOW()
WHERE
- migration_status='failed'
+ migration_status IN ('cancelled', 'failed')
AND cleanup_timestamp IS NULL
AND completed_timestamp IS NULL
`
+ sqlFixRequestedTimestamp = `UPDATE _vt.schema_migrations
+ SET
+ requested_timestamp = added_timestamp
+ WHERE
+ requested_timestamp < added_timestamp;
+ `
sqlSelectMigration = `SELECT
id,
migration_uuid,
@@ -581,9 +613,12 @@ const (
_vt.copy_state
WHERE vrepl_id=%a
`
- sqlSwapTables = "RENAME TABLE `%a` TO `%a`, `%a` TO `%a`, `%a` TO `%a`"
- sqlRenameTable = "RENAME TABLE `%a` TO `%a`"
- sqlRenameTwoTables = "RENAME TABLE `%a` TO `%a`, `%a` TO `%a`"
+ sqlSwapTables = "RENAME TABLE `%a` TO `%a`, `%a` TO `%a`, `%a` TO `%a`"
+ sqlRenameTable = "RENAME TABLE `%a` TO `%a`"
+ sqlLockTwoTablesWrite = "LOCK TABLES `%a` WRITE, `%a` WRITE"
+ sqlUnlockTables = "UNLOCK TABLES"
+ sqlCreateSentryTable = "CREATE TABLE IF NOT EXISTS `%a` (id INT PRIMARY KEY)"
+ sqlFindProcess = "SELECT id, Info as info FROM information_schema.processlist WHERE id=%a AND Info LIKE %a"
)
const (
@@ -612,6 +647,13 @@ var (
var ApplyDDL = []string{
sqlCreateSidecarDB,
sqlCreateSchemaMigrationsTable,
+ // Fixing a historical issue: past values of requested_timestamp could be '0000-00-00 00:00:00'.
+ // In turn, those cause `ERROR 1292 (22007): Incorrect datetime value` when attempting to
+ // make any DDL on the table.
+ // We trust added_timestamp to be non-zero (it defaults CURRENT_TIMESTAMP and never modified),
+ // and so we set requested_timestamp to that value.
+ // The query makes a full table scan, because neither column is indexed.
+ sqlFixRequestedTimestamp, // end of fix
alterSchemaMigrationsTableRetries,
alterSchemaMigrationsTableTablet,
alterSchemaMigrationsTableArtifacts,
@@ -648,4 +690,8 @@ var ApplyDDL = []string{
alterSchemaMigrationsComponentThrottled,
alterSchemaMigrationsCancelledTimestamp,
alterSchemaMigrationsTablePostponeLaunch,
+ alterSchemaMigrationsStage,
+ alterSchemaMigrationsCutoverAttempts,
+ alterSchemaMigrationsTableImmediateOperation,
+ alterSchemaMigrationsReviewedTimestamp,
}
diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go
index 85098d9ee0c..05c6b0485df 100644
--- a/go/vt/vttablet/sandboxconn/sandboxconn.go
+++ b/go/vt/vttablet/sandboxconn/sandboxconn.go
@@ -119,6 +119,8 @@ type SandboxConn struct {
EphemeralShardErr error
NotServing bool
+
+ streamHealthResponse *querypb.StreamHealthResponse
}
var _ queryservice.QueryService = (*SandboxConn)(nil) // compile-time interface check
@@ -406,9 +408,22 @@ func (sbc *SandboxConn) MessageAck(ctx context.Context, target *querypb.Target,
// SandboxSQRowCount is the default number of fake splits returned.
var SandboxSQRowCount = int64(10)
-// StreamHealth is not implemented.
+// SetStreamHealthResponse sets the StreamHealthResponse to be returned in StreamHealth.
+func (sbc *SandboxConn) SetStreamHealthResponse(res *querypb.StreamHealthResponse) {
+ sbc.mapMu.Lock()
+ defer sbc.mapMu.Unlock()
+ sbc.streamHealthResponse = res
+}
+
+// StreamHealth always mocks a "healthy" result by default. If you want to override this behavior you
+// can call SetStreamHealthResponse.
func (sbc *SandboxConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error {
- return fmt.Errorf("not implemented in test")
+ sbc.mapMu.Lock()
+ defer sbc.mapMu.Unlock()
+ if sbc.streamHealthResponse != nil {
+ return callback(sbc.streamHealthResponse)
+ }
+ return nil
}
// ExpectVStreamStartPos makes the conn verify that that the next vstream request has the right startPos.
@@ -657,7 +672,8 @@ func getSingleRowResult() *sqltypes.Result {
Rows: SingleRowResult.Rows,
}
- for _, field := range SingleRowResult.Fields {
+ fields := SingleRowResult.Fields
+ for _, field := range fields {
singleRowResult.Fields = append(singleRowResult.Fields, &querypb.Field{
Name: field.Name,
Type: field.Type,
diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go
index b7d3275b13f..751d9ad3d57 100644
--- a/go/vt/vttablet/tabletconntest/fakequeryservice.go
+++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go
@@ -42,10 +42,11 @@ type FakeQueryService struct {
TestingGateway bool
// these fields are used to simulate and synchronize on errors
- HasError bool
- HasBeginError bool
- TabletError error
- ErrorWait chan struct{}
+ HasError bool
+ HasBeginError bool
+ HasReserveError bool
+ TabletError error
+ ErrorWait chan struct{}
// these fields are used to simulate and synchronize on panics
Panics bool
@@ -566,7 +567,20 @@ func (f *FakeQueryService) BeginExecute(ctx context.Context, target *querypb.Tar
// BeginStreamExecute combines Begin and StreamExecute.
func (f *FakeQueryService) BeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.TransactionState, error) {
- panic("FakeQueryService does not implement BeginStreamExecute")
+ state, err := f.Begin(ctx, target, options)
+ if err != nil {
+ return state, err
+ }
+
+ for _, preQuery := range preQueries {
+ _, err := f.Execute(ctx, target, preQuery, nil, state.TransactionID, reservedID, options)
+ if err != nil {
+ return state, err
+ }
+ }
+
+ err = f.StreamExecute(ctx, target, sql, bindVariables, state.TransactionID, reservedID, options, callback)
+ return state, err
}
var (
@@ -710,7 +724,24 @@ func (f *FakeQueryService) ReserveExecute(ctx context.Context, target *querypb.T
// ReserveStreamExecute satisfies the Gateway interface
func (f *FakeQueryService) ReserveStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedState, error) {
- panic("implement me")
+ state, err := f.reserve(transactionID)
+ if err != nil {
+ return state, err
+ }
+ err = f.StreamExecute(ctx, target, sql, bindVariables, transactionID, state.ReservedID, options, callback)
+ return state, err
+}
+
+func (f *FakeQueryService) reserve(transactionID int64) (queryservice.ReservedState, error) {
+ reserveID := transactionID
+ if reserveID == 0 {
+ reserveID = beginTransactionID
+ }
+ if f.HasReserveError {
+ return queryservice.ReservedState{}, f.TabletError
+ }
+ state := queryservice.ReservedState{ReservedID: reserveID, TabletAlias: TestAlias}
+ return state, nil
}
// Release implements the QueryService interface
diff --git a/go/vt/vttablet/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go
index c23b4e61ff4..23d4a3ce2e2 100644
--- a/go/vt/vttablet/tabletconntest/tabletconntest.go
+++ b/go/vt/vttablet/tabletconntest/tabletconntest.go
@@ -584,6 +584,229 @@ func testStreamExecutePanics(t *testing.T, conn queryservice.QueryService, f *Fa
})
}
+func testBeginStreamExecute(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testBeginStreamExecute")
+ ctx := context.Background()
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ i := 0
+ _, err := conn.BeginStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ switch i {
+ case 0:
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ case 1:
+ if len(qr.Fields) == 0 {
+ qr.Fields = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult2) {
+ t.Errorf("Unexpected result2 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult2)
+ }
+ default:
+ t.Fatal("callback should not be called any more")
+ }
+ i++
+ if i >= 2 {
+ return io.EOF
+ }
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func testReserveStreamExecute(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testReserveStreamExecute")
+ ctx := context.Background()
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ i := 0
+ _, err := conn.ReserveStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ switch i {
+ case 0:
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ case 1:
+ if len(qr.Fields) == 0 {
+ qr.Fields = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult2) {
+ t.Errorf("Unexpected result2 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult2)
+ }
+ default:
+ t.Fatal("callback should not be called any more")
+ }
+ i++
+ if i >= 2 {
+ return io.EOF
+ }
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func testBeginStreamExecuteErrorInBegin(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testBeginExecuteErrorInBegin")
+ f.HasBeginError = true
+ testErrorHelper(t, f, "StreamExecute", func(ctx context.Context) error {
+ f.ErrorWait = make(chan struct{})
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ _, err := conn.BeginStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ // For some errors, the call can be retried.
+ select {
+ case <-f.ErrorWait:
+ return nil
+ default:
+ }
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ // signal to the server that the first result has been received
+ close(f.ErrorWait)
+ return nil
+ })
+ return err
+ })
+ f.HasBeginError = false
+}
+
+func testBeginStreamExecuteErrorInExecute(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testBeginStreamExecuteErrorInExecute")
+ f.HasError = true
+ testErrorHelper(t, f, "StreamExecute", func(ctx context.Context) error {
+ f.ErrorWait = make(chan struct{})
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ state, err := conn.BeginStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ // For some errors, the call can be retried.
+ select {
+ case <-f.ErrorWait:
+ return nil
+ default:
+ }
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ // signal to the server that the first result has been received
+ close(f.ErrorWait)
+ return nil
+ })
+ require.NotZero(t, state.TransactionID)
+ return err
+ })
+ f.HasError = false
+}
+
+func testReserveStreamExecuteErrorInReserve(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testReserveExecuteErrorInReserve")
+ f.HasReserveError = true
+ testErrorHelper(t, f, "ReserveStreamExecute", func(ctx context.Context) error {
+ f.ErrorWait = make(chan struct{})
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ _, err := conn.ReserveStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ // For some errors, the call can be retried.
+ select {
+ case <-f.ErrorWait:
+ return nil
+ default:
+ }
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ // signal to the server that the first result has been received
+ close(f.ErrorWait)
+ return nil
+ })
+ return err
+ })
+ f.HasReserveError = false
+}
+
+func testReserveStreamExecuteErrorInExecute(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testReserveStreamExecuteErrorInExecute")
+ f.HasError = true
+ testErrorHelper(t, f, "ReserveStreamExecute", func(ctx context.Context) error {
+ f.ErrorWait = make(chan struct{})
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ state, err := conn.ReserveStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ // For some errors, the call can be retried.
+ select {
+ case <-f.ErrorWait:
+ return nil
+ default:
+ }
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ // signal to the server that the first result has been received
+ close(f.ErrorWait)
+ return nil
+ })
+ require.NotZero(t, state.ReservedID)
+ return err
+ })
+ f.HasError = false
+}
+
+func testBeginStreamExecutePanics(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
+ t.Log("testStreamExecutePanics")
+ // early panic is before sending the Fields, that is returned
+ // by the StreamExecute call itself, or as the first error
+ // by ErrFunc
+ f.StreamExecutePanicsEarly = true
+ testPanicHelper(t, f, "StreamExecute.Early", func(ctx context.Context) error {
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ return conn.StreamExecute(ctx, TestTarget, StreamExecuteQuery, StreamExecuteBindVars, 0, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ return nil
+ })
+ })
+
+ // late panic is after sending Fields
+ f.StreamExecutePanicsEarly = false
+ testPanicHelper(t, f, "StreamExecute.Late", func(ctx context.Context) error {
+ f.PanicWait = make(chan struct{})
+ ctx = callerid.NewContext(ctx, TestCallerID, TestVTGateCallerID)
+ _, err := conn.BeginStreamExecute(ctx, TestTarget, nil, StreamExecuteQuery, StreamExecuteBindVars, 0, TestExecuteOptions, func(qr *sqltypes.Result) error {
+ // For some errors, the call can be retried.
+ select {
+ case <-f.PanicWait:
+ return nil
+ default:
+ }
+ if len(qr.Rows) == 0 {
+ qr.Rows = nil
+ }
+ if !qr.Equal(&StreamExecuteQueryResult1) {
+ t.Errorf("Unexpected result1 from StreamExecute: got %v wanted %v", qr, StreamExecuteQueryResult1)
+ }
+ // signal to the server that the first result has been received
+ close(f.PanicWait)
+ return nil
+ })
+ return err
+ })
+}
+
func testMessageStream(t *testing.T, conn queryservice.QueryService, f *FakeQueryService) {
t.Log("testMessageStream")
ctx := context.Background()
@@ -716,8 +939,10 @@ func TestSuite(t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *F
testExecute,
testBeginExecute,
testStreamExecute,
+ testBeginStreamExecute,
testMessageStream,
testMessageAck,
+ testReserveStreamExecute,
// error test cases
testBeginError,
@@ -735,6 +960,10 @@ func TestSuite(t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *F
testBeginExecuteErrorInBegin,
testBeginExecuteErrorInExecute,
testStreamExecuteError,
+ testBeginStreamExecuteErrorInBegin,
+ testBeginStreamExecuteErrorInExecute,
+ testReserveStreamExecuteErrorInReserve,
+ testReserveStreamExecuteErrorInExecute,
testMessageStreamError,
testMessageAckError,
@@ -753,6 +982,7 @@ func TestSuite(t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *F
testExecutePanics,
testBeginExecutePanics,
testStreamExecutePanics,
+ testBeginStreamExecutePanics,
testMessageStreamPanics,
testMessageAckPanics,
}
diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go
index dbcc158097f..57fe13ac260 100644
--- a/go/vt/vttablet/tabletmanager/restore.go
+++ b/go/vt/vttablet/tabletmanager/restore.go
@@ -47,14 +47,16 @@ import (
// It is only enabled if restore_from_backup is set.
var (
- restoreFromBackup bool
- restoreFromBackupTsStr string
- restoreConcurrency = 4
- waitForBackupInterval time.Duration
+ restoreFromBackup bool
+ restoreFromBackupAllowedEngines []string
+ restoreFromBackupTsStr string
+ restoreConcurrency = 4
+ waitForBackupInterval time.Duration
)
func registerRestoreFlags(fs *pflag.FlagSet) {
fs.BoolVar(&restoreFromBackup, "restore_from_backup", restoreFromBackup, "(init restore parameter) will check BackupStorage for a recent backup at startup and start there")
+ fs.StringSliceVar(&restoreFromBackupAllowedEngines, "restore-from-backup-allowed-engines", restoreFromBackupAllowedEngines, "(init restore parameter) if set, only backups taken with the specified engines are eligible to be restored")
fs.StringVar(&restoreFromBackupTsStr, "restore_from_backup_ts", restoreFromBackupTsStr, "(init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'")
fs.IntVar(&restoreConcurrency, "restore_concurrency", restoreConcurrency, "(init restore parameter) how many concurrent files to restore at once")
fs.DurationVar(&waitForBackupInterval, "wait_for_backup_interval", waitForBackupInterval, "(init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear")
@@ -97,7 +99,14 @@ func init() {
// It will either work, fail gracefully, or return
// an error in case of a non-recoverable error.
// It takes the action lock so no RPC interferes.
-func (tm *TabletManager) RestoreData(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, backupTime time.Time) error {
+func (tm *TabletManager) RestoreData(
+ ctx context.Context,
+ logger logutil.Logger,
+ waitForBackupInterval time.Duration,
+ deleteBeforeRestore bool,
+ backupTime time.Time,
+ allowedBackupEngines []string,
+) error {
if err := tm.lock(ctx); err != nil {
return err
}
@@ -151,7 +160,7 @@ func (tm *TabletManager) RestoreData(ctx context.Context, logger logutil.Logger,
startTime = time.Now()
- err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, backupTime)
+ err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, backupTime, allowedBackupEngines)
if err != nil {
return err
}
@@ -169,7 +178,12 @@ func (tm *TabletManager) RestoreData(ctx context.Context, logger logutil.Logger,
return nil
}
-func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, backupTime time.Time) error {
+func (tm *TabletManager) restoreDataLocked(ctx context.Context,
+ logger logutil.Logger,
+ waitForBackupInterval time.Duration,
+ deleteBeforeRestore bool,
+ backupTime time.Time,
+ allowedBackupEngines []string) error {
tablet := tm.Tablet()
originalType := tablet.Type
@@ -195,18 +209,23 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L
log.Infof("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, backupTime)
}
+ if backupTime.IsZero() {
+ backupTime = logutil.ProtoToTime(keyspaceInfo.SnapshotTime)
+ }
+
params := mysqlctl.RestoreParams{
- Cnf: tm.Cnf,
- Mysqld: tm.MysqlDaemon,
- Logger: logger,
- Concurrency: restoreConcurrency,
- HookExtraEnv: tm.hookExtraEnv(),
- LocalMetadata: localMetadata,
- DeleteBeforeRestore: deleteBeforeRestore,
- DbName: topoproto.TabletDbName(tablet),
- Keyspace: keyspace,
- Shard: tablet.Shard,
- StartTime: backupTime,
+ Cnf: tm.Cnf,
+ Mysqld: tm.MysqlDaemon,
+ Logger: logger,
+ Concurrency: restoreConcurrency,
+ HookExtraEnv: tm.hookExtraEnv(),
+ LocalMetadata: localMetadata,
+ DeleteBeforeRestore: deleteBeforeRestore,
+ DbName: topoproto.TabletDbName(tablet),
+ Keyspace: keyspace,
+ Shard: tablet.Shard,
+ StartTime: backupTime,
+ AllowedBackupEngines: allowedBackupEngines,
}
// Check whether we're going to restore before changing to RESTORE type,
diff --git a/go/vt/vttablet/tabletmanager/rpc_actions.go b/go/vt/vttablet/tabletmanager/rpc_actions.go
index 1093c331a1a..d51f5d2f84a 100644
--- a/go/vt/vttablet/tabletmanager/rpc_actions.go
+++ b/go/vt/vttablet/tabletmanager/rpc_actions.go
@@ -30,6 +30,7 @@ import (
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
// DBAction is used to tell ChangeTabletType whether to call SetReadOnly on change to
@@ -82,7 +83,13 @@ func (tm *TabletManager) ChangeType(ctx context.Context, tabletType topodatapb.T
return err
}
defer tm.unlock()
- return tm.changeTypeLocked(ctx, tabletType, DBActionNone, convertBoolToSemiSyncAction(semiSync))
+
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return err
+ }
+
+ return tm.changeTypeLocked(ctx, tabletType, DBActionNone, semiSyncAction)
}
// ChangeType changes the tablet type
@@ -142,9 +149,23 @@ func (tm *TabletManager) RunHealthCheck(ctx context.Context) {
tm.QueryServiceControl.BroadcastHealth()
}
-func convertBoolToSemiSyncAction(semiSync bool) SemiSyncAction {
- if semiSync {
- return SemiSyncActionSet
+func (tm *TabletManager) convertBoolToSemiSyncAction(semiSync bool) (SemiSyncAction, error) {
+ semiSyncExtensionLoaded, err := tm.MysqlDaemon.SemiSyncExtensionLoaded()
+ if err != nil {
+ return SemiSyncActionNone, err
+ }
+
+ if semiSyncExtensionLoaded {
+ if semiSync {
+ return SemiSyncActionSet, nil
+ } else {
+ return SemiSyncActionUnset, nil
+ }
+ } else {
+ if semiSync {
+ return SemiSyncActionNone, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded")
+ } else {
+ return SemiSyncActionNone, nil
+ }
}
- return SemiSyncActionUnset
}
diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go
index 2ba894f288f..13ab38dc241 100644
--- a/go/vt/vttablet/tabletmanager/rpc_agent.go
+++ b/go/vt/vttablet/tabletmanager/rpc_agent.go
@@ -136,7 +136,9 @@ type RPCTM interface {
Backup(ctx context.Context, logger logutil.Logger, request *tabletmanagerdatapb.BackupRequest) error
- RestoreFromBackup(ctx context.Context, logger logutil.Logger, backupTime time.Time) error
+ RestoreFromBackup(ctx context.Context, logger logutil.Logger, backupTime time.Time, allowedBackupEngines []string) error
+
+ IsBackupRunning() bool
// HandleRPCPanic is to be called in a defer statement in each
// RPC input point.
diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go
index 0fa0744a1c5..36c9fc4a303 100644
--- a/go/vt/vttablet/tabletmanager/rpc_backup.go
+++ b/go/vt/vttablet/tabletmanager/rpc_backup.go
@@ -50,7 +50,13 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req
if !req.AllowPrimary && currentTablet.Type == topodatapb.TabletType_PRIMARY {
return fmt.Errorf("type PRIMARY cannot take backup. if you really need to do this, rerun the backup command with --allow_primary")
}
- engine, err := mysqlctl.GetBackupEngine()
+
+ backupEngine := ""
+ if req.BackupEngine != nil {
+ backupEngine = *req.BackupEngine
+ }
+
+ engine, err := mysqlctl.GetBackupEngine(backupEngine)
if err != nil {
return vterrors.Wrap(err, "failed to find backup engine")
}
@@ -115,6 +121,7 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req
Shard: tablet.Shard,
TabletAlias: topoproto.TabletAliasString(tablet.Alias),
BackupTime: time.Now(),
+ BackupEngine: backupEngine,
}
returnErr := mysqlctl.Backup(ctx, backupParams)
@@ -153,7 +160,7 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req
// RestoreFromBackup deletes all local data and then restores the data from the latest backup [at
// or before the backupTime value if specified]
-func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.Logger, backupTime time.Time) error {
+func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.Logger, backupTime time.Time, allowedBackupEngines []string) error {
if err := tm.lock(ctx); err != nil {
return err
}
@@ -171,7 +178,7 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L
l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger)
// now we can run restore
- err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, backupTime)
+ err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, backupTime, allowedBackupEngines)
// re-run health check to be sure to capture any replication delay
tm.QueryServiceControl.BroadcastHealth()
@@ -179,6 +186,10 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L
return err
}
+func (tm *TabletManager) IsBackupRunning() bool {
+ return tm._isBackupRunning
+}
+
func (tm *TabletManager) beginBackup(backupMode string) error {
tm.mutex.Lock()
defer tm.mutex.Unlock()
diff --git a/go/vt/vttablet/tabletmanager/rpc_query_test.go b/go/vt/vttablet/tabletmanager/rpc_query_test.go
index f6167e24917..87a64b2d8b7 100644
--- a/go/vt/vttablet/tabletmanager/rpc_query_test.go
+++ b/go/vt/vttablet/tabletmanager/rpc_query_test.go
@@ -27,7 +27,7 @@ import (
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/dbconfigs"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/vttablet/tabletservermock"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
@@ -38,7 +38,7 @@ func TestTabletManager_ExecuteFetchAsDba(t *testing.T) {
cp := mysql.ConnParams{}
db := fakesqldb.New(t)
db.AddQueryPattern(".*", &sqltypes.Result{})
- daemon := fakemysqldaemon.NewFakeMysqlDaemon(db)
+ daemon := mysqlctl.NewFakeMysqlDaemon(db)
dbName := " escap`e me "
tm := &TabletManager{
diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go
index 49dd010f289..2790e37a474 100644
--- a/go/vt/vttablet/tabletmanager/rpc_replication.go
+++ b/go/vt/vttablet/tabletmanager/rpc_replication.go
@@ -31,6 +31,7 @@ import (
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vtctl/reparentutil"
"vitess.io/vitess/go/vt/vterrors"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
@@ -43,7 +44,7 @@ var disableReplicationManager bool
func registerReplicationFlags(fs *pflag.FlagSet) {
fs.Bool("enable_semi_sync", false, "")
- fs.MarkDeprecated("enable_semi_sync", "--enable_semi_sync is deprecated; please set the correct durability policy on the keyspace instead.")
+ fs.MarkDeprecated("enable_semi_sync", "please set the correct durability policy on the keyspace instead.")
fs.BoolVar(&setSuperReadOnly, "use_super_read_only", setSuperReadOnly, "Set super_read_only flag when performing planned failover.")
fs.BoolVar(&disableReplicationManager, "disable-replication-manager", disableReplicationManager, "Disable replication manager to prevent replication repairs.")
@@ -60,7 +61,11 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat
if err != nil {
return nil, err
}
- return mysql.ReplicationStatusToProto(status), nil
+
+ protoStatus := mysql.ReplicationStatusToProto(status)
+ protoStatus.BackupRunning = tm.IsBackupRunning()
+
+ return protoStatus, nil
}
// FullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
@@ -294,7 +299,12 @@ func (tm *TabletManager) StartReplication(ctx context.Context, semiSync bool) er
}
}()
- if err := tm.fixSemiSync(tm.Tablet().Type, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return err
+ }
+
+ if err := tm.fixSemiSync(tm.Tablet().Type, semiSyncAction); err != nil {
return err
}
return tm.MysqlDaemon.StartReplication(tm.hookExtraEnv())
@@ -351,7 +361,7 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string
if setSuperReadOnly {
// Setting super_read_only off so that we can run the DDL commands
- if err := tm.MysqlDaemon.SetSuperReadOnly(false); err != nil {
+ if _, err := tm.MysqlDaemon.SetSuperReadOnly(ctx, false); err != nil {
if strings.Contains(err.Error(), strconv.Itoa(mysql.ERUnknownSystemVariable)) {
log.Warningf("server does not know about super_read_only, continuing anyway...")
} else {
@@ -377,16 +387,21 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string
return "", err
}
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return "", err
+ }
+
// Set the server read-write, from now on we can accept real
// client writes. Note that if semi-sync replication is enabled,
// we'll still need some replicas to be able to commit transactions.
- if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_PRIMARY, DBActionSetReadWrite, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_PRIMARY, DBActionSetReadWrite, semiSyncAction); err != nil {
return "", err
}
// Enforce semi-sync after changing the tablet)type to PRIMARY. Otherwise, the
// primary will hang while trying to create the database.
- if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil {
return "", err
}
@@ -423,11 +438,16 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab
}
defer tm.unlock()
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return err
+ }
+
// If we were a primary type, switch our type to replica. This
// is used on the old primary when using InitShardPrimary with
// -force, and the new primary is different from the old primary.
if tm.Tablet().Type == topodatapb.TabletType_PRIMARY {
- if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_REPLICA, DBActionNone, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_REPLICA, DBActionNone, semiSyncAction); err != nil {
return err
}
}
@@ -450,7 +470,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab
if tt == topodatapb.TabletType_PRIMARY {
tt = topodatapb.TabletType_REPLICA
}
- if err := tm.fixSemiSync(tt, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ if err := tm.fixSemiSync(tt, semiSyncAction); err != nil {
return err
}
@@ -545,7 +565,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure
// idempotent.
if setSuperReadOnly {
// Setting super_read_only also sets read_only
- if err := tm.MysqlDaemon.SetSuperReadOnly(true); err != nil {
+ if _, err := tm.MysqlDaemon.SetSuperReadOnly(ctx, true); err != nil {
if strings.Contains(err.Error(), strconv.Itoa(mysql.ERUnknownSystemVariable)) {
log.Warningf("server does not know about super_read_only, continuing anyway...")
} else {
@@ -601,8 +621,13 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e
}
defer tm.unlock()
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return err
+ }
+
// If using semi-sync, we need to enable source-side.
- if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil {
return err
}
@@ -670,9 +695,14 @@ func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias *
}
defer tm.unlock()
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return err
+ }
+
// setReplicationSourceLocked also fixes the semi-sync. In case the tablet type is primary it assumes that it will become a replica if SetReplicationSource
// is called, so we always call fixSemiSync with a non-primary tablet type. This will always set the source side replication to false.
- return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, convertBoolToSemiSyncAction(semiSync))
+ return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, semiSyncAction)
}
func (tm *TabletManager) setReplicationSourceRepairReplication(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) {
@@ -681,6 +711,23 @@ func (tm *TabletManager) setReplicationSourceRepairReplication(ctx context.Conte
return err
}
+ durabilityName, err := tm.TopoServer.GetKeyspaceDurability(ctx, tm.Tablet().Keyspace)
+ if err != nil {
+ return vterrors.Wrapf(err, "cannot read keyspace durability policy %v", tm.Tablet().Keyspace)
+ }
+ log.Infof("Getting a new durability policy for %v", durabilityName)
+ durability, err := reparentutil.GetDurabilityPolicy(durabilityName)
+ if err != nil {
+ return vterrors.Wrapf(err, "cannot get durability policy %v", durabilityName)
+ }
+
+ // If using semi-sync, we need to enable it before connecting to primary.
+ // We should set the correct type, since it is used in replica semi-sync.
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(reparentutil.IsReplicaSemiSync(durability, parent.Tablet, tm.Tablet()))
+ if err != nil {
+ return err
+ }
+
ctx, unlock, lockErr := tm.TopoServer.LockShard(ctx, parent.Tablet.GetKeyspace(), parent.Tablet.GetShard(), fmt.Sprintf("repairReplication to %v as parent)", topoproto.TabletAliasString(parentAlias)))
if lockErr != nil {
return lockErr
@@ -688,7 +735,7 @@ func (tm *TabletManager) setReplicationSourceRepairReplication(ctx context.Conte
defer unlock(&err)
- return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, SemiSyncActionNone)
+ return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, semiSyncAction)
}
func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error {
@@ -702,6 +749,12 @@ func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Contex
}
func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync SemiSyncAction) (err error) {
+ tm._isSetReplicationSourceLockedRunning = true
+
+ defer func() {
+ tm._isSetReplicationSourceLockedRunning = false
+ }()
+
// End orchestrator maintenance at the end of fixing replication.
// This is a best effort operation, so it should happen in a goroutine
defer func() {
@@ -774,14 +827,12 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA
}
host := parent.Tablet.MysqlHostname
port := int(parent.Tablet.MysqlPort)
- // We want to reset the replication parameters and set replication source again when forceStartReplication is provided
- // because sometimes MySQL gets stuck due to improper initialization of master info structure or related failures and throws errors like
- // ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log
- // These errors can only be resolved by resetting the replication parameters, otherwise START SLAVE fails. So when this RPC
- // gets called from VTOrc or replication manager to fix the replication in these cases with forceStartReplication, we should also
- // reset the replication parameters and set the source port information again.
- if status.SourceHost != host || status.SourcePort != port || forceStartReplication {
- // This handles reseting the replication parameters, changing the address and then starting the replication.
+ // If host is empty, then we shouldn't even attempt the reparent. That tablet has already shutdown.
+ if host == "" {
+ return vterrors.New(vtrpc.Code_FAILED_PRECONDITION, "Shard primary has empty mysql hostname")
+ }
+ if status.SourceHost != host || status.SourcePort != port {
+ // This handles both changing the address and starting replication.
if err := tm.MysqlDaemon.SetReplicationSource(ctx, host, port, wasReplicating, shouldbeReplicating); err != nil {
if err := tm.handleRelayLogError(err); err != nil {
return err
@@ -862,6 +913,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe
return StopReplicationAndGetStatusResponse{}, vterrors.Wrap(err, "before status failed")
}
before := mysql.ReplicationStatusToProto(rs)
+ before.BackupRunning = tm.IsBackupRunning()
if stopReplicationMode == replicationdatapb.StopReplicationMode_IOTHREADONLY {
if !rs.IOHealthy() {
@@ -908,6 +960,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe
}, vterrors.Wrap(err, "acquiring replication status failed")
}
after := mysql.ReplicationStatusToProto(rsAfter)
+ after.BackupRunning = tm.IsBackupRunning()
rs.Position = rsAfter.Position
rs.RelayLogPosition = rsAfter.RelayLogPosition
@@ -964,8 +1017,13 @@ func (tm *TabletManager) PromoteReplica(ctx context.Context, semiSync bool) (str
return "", err
}
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync)
+ if err != nil {
+ return "", err
+ }
+
// If using semi-sync, we need to enable it before going read-write.
- if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, convertBoolToSemiSyncAction(semiSync)); err != nil {
+ if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil {
return "", err
}
@@ -1051,7 +1109,7 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT
return nil
}
- //shouldAck := semiSync == SemiSyncActionSet
+ // shouldAck := semiSync == SemiSyncActionSet
shouldAck := isPrimaryEligible(tabletType)
acking, err := tm.MysqlDaemon.SemiSyncReplicationStatus()
if err != nil {
@@ -1072,11 +1130,17 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT
return nil
}
+// handleRelayLogError resets replication of the instance.
+// This is required because sometimes MySQL gets stuck due to improper initialization of
+// master info structure or related failures and throws errors like
+// ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log
+// These errors can only be resolved by resetting the replication, otherwise START SLAVE fails.
func (tm *TabletManager) handleRelayLogError(err error) error {
// attempt to fix this error:
// Slave failed to initialize relay log info structure from the repository (errno 1872) (sqlstate HY000) during query: START SLAVE
// see https://bugs.mysql.com/bug.php?id=83713 or https://github.com/vitessio/vitess/issues/5067
- if strings.Contains(err.Error(), "Slave failed to initialize relay log info structure from the repository") {
+ // The same fix also works for https://github.com/vitessio/vitess/issues/10955.
+ if strings.Contains(err.Error(), "Slave failed to initialize relay log info structure from the repository") || strings.Contains(err.Error(), "Could not initialize master info structure") {
// Stop, reset and start replication again to resolve this error
if err := tm.MysqlDaemon.RestartReplication(tm.hookExtraEnv()); err != nil {
return err
@@ -1089,6 +1153,15 @@ func (tm *TabletManager) handleRelayLogError(err error) error {
// repairReplication tries to connect this server to whoever is
// the current primary of the shard, and start replicating.
func (tm *TabletManager) repairReplication(ctx context.Context) error {
+ if tm._isSetReplicationSourceLockedRunning {
+ // we are actively setting replication source,
+ // repairReplication will block due to higher
+ // authority holding a shard lock (PRS on vtctld)
+ log.Infof("slack-debug: we are actively setting replication source, exiting")
+
+ return nil
+ }
+
tablet := tm.Tablet()
si, err := tm.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard)
diff --git a/go/vt/vttablet/tabletmanager/rpc_replication_test.go b/go/vt/vttablet/tabletmanager/rpc_replication_test.go
index 0aac8c971ec..d69a8414927 100644
--- a/go/vt/vttablet/tabletmanager/rpc_replication_test.go
+++ b/go/vt/vttablet/tabletmanager/rpc_replication_test.go
@@ -24,7 +24,7 @@ import (
"github.com/stretchr/testify/require"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/topo/memorytopo"
)
@@ -46,7 +46,7 @@ func TestPromoteReplicaReplicationManagerSuccess(t *testing.T) {
numTicksRan++
})
// Set the promotion lag to a second and then run PromoteReplica
- tm.MysqlDaemon.(*fakemysqldaemon.FakeMysqlDaemon).PromoteLag = time.Second
+ tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon).PromoteLag = time.Second
_, err := tm.PromoteReplica(ctx, false)
require.NoError(t, err)
// At the end we expect the replication manager to be stopped.
@@ -68,7 +68,7 @@ func TestPromoteReplicaReplicationManagerFailure(t *testing.T) {
require.True(t, tm.replManager.ticks.Running())
// Set the promotion lag to a second and then run PromoteReplica
- tm.MysqlDaemon.(*fakemysqldaemon.FakeMysqlDaemon).PromoteError = fmt.Errorf("promote error")
+ tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon).PromoteError = fmt.Errorf("promote error")
_, err := tm.PromoteReplica(ctx, false)
require.Error(t, err)
// At the end we expect the replication manager to be stopped.
diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go
index f3a3d12a1d2..af4a6451354 100644
--- a/go/vt/vttablet/tabletmanager/tm_init.go
+++ b/go/vt/vttablet/tabletmanager/tm_init.go
@@ -211,6 +211,8 @@ type TabletManager struct {
_lockTablesTimer *time.Timer
// _isBackupRunning tells us whether there is a backup that is currently running
_isBackupRunning bool
+ // _isSetReplicationSourceLockedRunning indicates we are actively running setReplicationSourceLocked
+ _isSetReplicationSourceLockedRunning bool
}
// BuildTabletFromInput builds a tablet record from input parameters.
@@ -769,7 +771,7 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) {
// restoreFromBackup will just be a regular action
// (same as if it was triggered remotely)
- if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime); err != nil {
+ if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreFromBackupAllowedEngines); err != nil {
log.Exitf("RestoreFromBackup failed: %v", err)
}
}()
@@ -909,13 +911,24 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t
}
// If using semi-sync, we need to enable it before connecting to primary.
// We should set the correct type, since it is used in replica semi-sync
+
tablet.Type = tabletType
- if err := tm.fixSemiSync(tabletType, convertBoolToSemiSyncAction(reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet))); err != nil {
+
+ semiSyncAction, err := tm.convertBoolToSemiSyncAction(reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet))
+ if err != nil {
+ return nil, err
+ }
+
+ if err := tm.fixSemiSync(tabletType, semiSyncAction); err != nil {
return nil, err
}
// Set primary and start replication.
- if err := tm.MysqlDaemon.SetReplicationSource(ctx, currentPrimary.Tablet.MysqlHostname, int(currentPrimary.Tablet.MysqlPort), false /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil {
+ if currentPrimary.Tablet.MysqlHostname == "" {
+ log.Warningf("primary tablet in the shard record doesn't have mysql hostname specified. probably because that tablet shutdown.")
+ return nil, nil
+ }
+ if err := tm.MysqlDaemon.SetReplicationSource(ctx, currentPrimary.Tablet.MysqlHostname, int(currentPrimary.Tablet.MysqlPort), true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil {
return nil, vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed")
}
diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go
index 933a9501a55..af93b6b73fd 100644
--- a/go/vt/vttablet/tabletmanager/tm_init_test.go
+++ b/go/vt/vttablet/tabletmanager/tm_init_test.go
@@ -33,7 +33,7 @@ import (
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
@@ -379,10 +379,10 @@ func TestCheckPrimaryShip(t *testing.T) {
tablet.Type = topodatapb.TabletType_REPLICA
tablet.PrimaryTermStartTime = nil
// Get the fakeMySQL and set it up to expect a set replication source command
- fakeMysql := tm.MysqlDaemon.(*fakemysqldaemon.FakeMysqlDaemon)
+ fakeMysql := tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon)
fakeMysql.SetReplicationSourceInputs = append(fakeMysql.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", otherTablet.MysqlHostname, otherTablet.MysqlPort))
fakeMysql.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -638,7 +638,7 @@ func TestGetBuildTags(t *testing.T) {
}
}
-func newTestMysqlDaemon(t *testing.T, port int32) *fakemysqldaemon.FakeMysqlDaemon {
+func newTestMysqlDaemon(t *testing.T, port int32) *mysqlctl.FakeMysqlDaemon {
t.Helper()
db := fakesqldb.New(t)
@@ -659,7 +659,7 @@ func newTestMysqlDaemon(t *testing.T, port int32) *fakemysqldaemon.FakeMysqlDaem
db.AddQueryPattern("UPDATE _vt\\.(local|shard)_metadata SET db_name='.+' WHERE db_name=''", &sqltypes.Result{})
db.AddQueryPattern("INSERT INTO _vt\\.local_metadata \\(.+\\) VALUES \\(.+\\) ON DUPLICATE KEY UPDATE value ?= ?'.+'.*", &sqltypes.Result{})
- mysqld := fakemysqldaemon.NewFakeMysqlDaemon(db)
+ mysqld := mysqlctl.NewFakeMysqlDaemon(db)
mysqld.MysqlPort = sync2.NewAtomicInt32(port)
return mysqld
diff --git a/go/vt/vttablet/tabletmanager/tm_state_test.go b/go/vt/vttablet/tabletmanager/tm_state_test.go
index 48e2123554f..537580d4853 100644
--- a/go/vt/vttablet/tabletmanager/tm_state_test.go
+++ b/go/vt/vttablet/tabletmanager/tm_state_test.go
@@ -28,7 +28,7 @@ import (
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/key"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/faketopo"
@@ -105,7 +105,7 @@ func TestStateDenyList(t *testing.T) {
tm := newTestTM(t, ts, 1, "ks", "0")
defer tm.Stop()
- fmd := tm.MysqlDaemon.(*fakemysqldaemon.FakeMysqlDaemon)
+ fmd := tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon)
fmd.Schema = &tabletmanagerdatapb.SchemaDefinition{
TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{
Name: "t1",
diff --git a/go/vt/vttablet/tabletmanager/vdiff/controller.go b/go/vt/vttablet/tabletmanager/vdiff/controller.go
index e3a362d3c6b..f3ee434edcd 100644
--- a/go/vt/vttablet/tabletmanager/vdiff/controller.go
+++ b/go/vt/vttablet/tabletmanager/vdiff/controller.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"strings"
+ "time"
"vitess.io/vitess/go/vt/proto/tabletmanagerdata"
"vitess.io/vitess/go/vt/vterrors"
@@ -127,18 +128,20 @@ func (ct *controller) run(ctx context.Context) {
row := qr.Named().Row()
state := VDiffState(strings.ToLower(row["state"].ToString()))
switch state {
- case PendingState:
- log.Infof("Starting vdiff %s", ct.uuid)
+ case PendingState, StartedState:
+ action := "Starting"
+ if state == StartedState {
+ action = "Restarting"
+ }
+ log.Infof("%s vdiff %s", action, ct.uuid)
if err := ct.start(ctx, dbClient); err != nil {
log.Errorf("Encountered an error for vdiff %s: %s", ct.uuid, err)
- insertVDiffLog(ctx, dbClient, ct.id, fmt.Sprintf("Error: %s", err))
- if err = ct.updateState(dbClient, ErrorState, err); err != nil {
- log.Errorf("Encountered an error marking vdiff %s as errored: %v", ct.uuid, err)
+ if err := ct.saveErrorState(ctx, err); err != nil {
+ log.Errorf("Unable to save error state for vdiff %s; giving up because %s", ct.uuid, err.Error())
}
- return
}
default:
- log.Infof("VDiff %s was not marked as pending, doing nothing", state)
+ log.Infof("VDiff %s was not marked as runnable (state: %s), doing nothing", ct.uuid, state)
}
}
@@ -271,3 +274,53 @@ func (ct *controller) validate() error {
// TODO: check if vreplication workflow has errors, what else?
return nil
}
+
+// saveErrorState saves the error state for the vdiff in the database.
+// It never gives up trying to save the error state, unless the context
+// has been cancelled or the done channel has been closed -- indicating
+// that the engine is closing or the vdiff has been explicitly stopped.
+// Note that when the engine is later opened the started vdiff will be
+// restarted even though we were unable to save the error state.
+// It uses exponential backoff with a factor of 1.5 to avoid creating
+// too many database connections.
+func (ct *controller) saveErrorState(ctx context.Context, saveErr error) error {
+ retryDelay := 100 * time.Millisecond
+ maxRetryDelay := 60 * time.Second
+ save := func() error {
+ dbClient := ct.vde.dbClientFactoryFiltered()
+ if err := dbClient.Connect(); err != nil {
+ return err
+ }
+ defer dbClient.Close()
+
+ if err := ct.updateState(dbClient, ErrorState, saveErr); err != nil {
+ return err
+ }
+ insertVDiffLog(ctx, dbClient, ct.id, fmt.Sprintf("Error: %s", saveErr))
+
+ return nil
+ }
+
+ for {
+ if err := save(); err != nil {
+ log.Warningf("Failed to persist vdiff error state: %v. Will retry in %s", err, retryDelay.String())
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("engine is shutting down")
+ case <-ct.done:
+ return fmt.Errorf("vdiff was stopped")
+ case <-time.After(retryDelay):
+ if retryDelay < maxRetryDelay {
+ retryDelay = time.Duration(float64(retryDelay) * 1.5)
+ if retryDelay > maxRetryDelay {
+ retryDelay = maxRetryDelay
+ }
+ }
+ continue
+ }
+ }
+
+ // Success
+ return nil
+ }
+}
diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go
index 80165ff3d87..73ed5d9deed 100644
--- a/go/vt/vttablet/tabletmanager/vdiff/engine.go
+++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go
@@ -111,8 +111,16 @@ func (vde *Engine) Open(ctx context.Context, vre *vreplication.Engine) {
}
func (vde *Engine) openLocked(ctx context.Context) error {
- // Start any pending VDiffs
- rows, err := vde.getPendingVDiffs(ctx)
+ // This should never happen
+ if len(vde.controllers) > 0 {
+ log.Warningf("VDiff Engine invalid state detected: %d controllers existed when opening; resetting state", len(vde.controllers))
+ vde.resetControllers()
+ }
+
+ // At this point the tablet has no controllers running. So
+ // we want to start any VDiffs that have not been explicitly
+ // stopped or otherwise finished.
+ rows, err := vde.getVDiffsToRun(ctx)
if err != nil {
return err
}
@@ -219,10 +227,7 @@ func (vde *Engine) Close() {
vde.cancel()
// We still have to wait for all controllers to stop.
- for _, ct := range vde.controllers {
- ct.Stop()
- }
- vde.controllers = make(map[int64]*controller)
+ vde.resetControllers()
// Wait for long-running functions to exit.
vde.wg.Wait()
@@ -232,14 +237,7 @@ func (vde *Engine) Close() {
log.Infof("VDiff Engine: closed")
}
-func (vde *Engine) getDBClient(isAdmin bool) binlogplayer.DBClient {
- if isAdmin {
- return vde.dbClientFactoryDba()
- }
- return vde.dbClientFactoryFiltered()
-}
-
-func (vde *Engine) getPendingVDiffs(ctx context.Context) (*sqltypes.Result, error) {
+func (vde *Engine) getVDiffsToRun(ctx context.Context) (*sqltypes.Result, error) {
dbClient := vde.dbClientFactoryFiltered()
if err := dbClient.Connect(); err != nil {
return nil, err
@@ -248,7 +246,7 @@ func (vde *Engine) getPendingVDiffs(ctx context.Context) (*sqltypes.Result, erro
// We have to use ExecIgnore here so as not to block quick tablet state
// transitions from primary to non-primary when starting the engine
- qr, err := withDDL.ExecIgnore(ctx, sqlGetPendingVDiffs, dbClient.ExecuteFetch)
+ qr, err := withDDL.ExecIgnore(ctx, sqlGetVDiffsToRun, dbClient.ExecuteFetch)
if err != nil {
return nil, err
}
@@ -343,3 +341,10 @@ func (vde *Engine) retryErroredVDiffs() {
}
}
}
+
+func (vde *Engine) resetControllers() {
+ for _, ct := range vde.controllers {
+ ct.Stop()
+ }
+ vde.controllers = make(map[int64]*controller)
+}
diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go
new file mode 100644
index 00000000000..9a7014c2ab3
--- /dev/null
+++ b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go
@@ -0,0 +1,252 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vdiff
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/binlog/binlogplayer"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+var (
+ wfName = "testwf"
+ optionsJS = `{"core_options": {"auto_retry": true}}`
+ vdiffTestCols = "id|vdiff_uuid|workflow|keyspace|shard|db_name|state|options|last_error"
+ vdiffTestColTypes = "int64|varchar|varbinary|varbinary|varchar|varbinary|varbinary|json|varbinary"
+ singleRowAffected = &sqltypes.Result{RowsAffected: 1}
+ noResults = &sqltypes.Result{}
+ testSchema = &tabletmanagerdatapb.SchemaDefinition{
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{
+ {
+ Name: "t1",
+ Columns: []string{"c1", "c2"},
+ PrimaryKeyColumns: []string{"c1"},
+ Fields: sqltypes.MakeTestFields("c1|c2", "int64|int64"),
+ },
+ },
+ }
+)
+
+func TestEngineOpen(t *testing.T) {
+ UUID := uuid.New().String()
+ source := `keyspace:"testsrc" shard:"0" filter:{rules:{match:"t1" filter:"select * from t1"}}`
+ tests := []struct {
+ name string
+ state VDiffState
+ }{
+ // This needs to be started, for the first time, on open
+ {
+ name: "pending vdiff",
+ state: PendingState,
+ },
+ // This needs to be restarted on open as it was previously started
+ // but was unable to terminate normally (e.g. crash) in the previous
+ // engine.
+ {
+ name: "started vdiff",
+ state: StartedState,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tablet := addTablet(100)
+ tablet.Type = topodatapb.TabletType_PRIMARY
+ defer deleteTablet(tablet)
+ resetBinlogClient()
+ dbClient := binlogplayer.NewMockDBClient(t)
+ dbClientFactory := func() binlogplayer.DBClient { return dbClient }
+ vde := &Engine{
+ controllers: make(map[int64]*controller),
+ ts: env.TopoServ,
+ thisTablet: tablet,
+ dbClientFactoryFiltered: dbClientFactory,
+ dbClientFactoryDba: dbClientFactory,
+ dbName: vdiffdb,
+ }
+ require.False(t, vde.IsOpen())
+
+ initialQR := sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ vdiffTestCols,
+ vdiffTestColTypes,
+ ),
+ fmt.Sprintf("1|%s|%s|%s|%s|%s|%s|%s|", UUID, wfName, env.KeyspaceName, env.ShardName, vdiffdb, tt.state, optionsJS),
+ )
+
+ dbClient.ExpectRequest("select * from _vt.vdiff where state in ('started','pending')", initialQR, nil)
+
+ dbClient.ExpectRequest("select * from _vt.vdiff where id = 1", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ vdiffTestCols,
+ vdiffTestColTypes,
+ ),
+ fmt.Sprintf("1|%s|%s|%s|%s|%s|%s|%s|", UUID, wfName, env.KeyspaceName, env.ShardName, vdiffdb, tt.state, optionsJS),
+ ), nil)
+
+ dbClient.ExpectRequest(fmt.Sprintf("select * from _vt.vreplication where workflow = '%s' and db_name = '%s'", wfName, vdiffdb), sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|workflow|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type",
+ "int64|varbinary|blob|varbinary|varbinary|int64|int64|varbinary|varbinary|int64|int64|varbinary|varbinary|varbinary|int64|varbinary|int64|int64|int64|varchar|int64",
+ ),
+ fmt.Sprintf("1|%s|%s|MySQL56/f69ed286-6909-11ed-8342-0a50724f3211:1-110||9223372036854775807|9223372036854775807||PRIMARY,REPLICA|1669511347|0|Running||%s|200||1669511347|1|0||1", wfName, source, vdiffdb),
+ ), nil)
+
+ dbClient.ExpectRequest("update _vt.vdiff set state = 'started', last_error = '' , started_at = utc_timestamp() where id = 1", singleRowAffected, nil)
+ dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'State changed to: started')", singleRowAffected, nil)
+ dbClient.ExpectRequest(`select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report
+ from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
+ where vdt.vdiff_id = 1 and vdt.table_name = 't1'`, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "lastpk|mismatch|report",
+ "varbinary|int64|json",
+ ),
+ `fields:{name:"c1" type:INT64 table:"t1" org_table:"t1" database:"vt_customer" org_name:"c1" column_length:20 charset:63 flags:53251} rows:{lengths:1 values:"1"}|0|{}`,
+ ), nil)
+ dbClient.ExpectRequest("select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = 'vdiff_test' and table_name in ('t1')", sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "table_name|table_rows",
+ "varchar|int64",
+ ),
+ "t1|1",
+ ), nil)
+ dbClient.ExpectRequest(`select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report
+ from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
+ where vdt.vdiff_id = 1 and vdt.table_name = 't1'`, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "lastpk|mismatch|report",
+ "varbinary|int64|json",
+ ),
+ `fields:{name:"c1" type:INT64 table:"t1" org_table:"t1" database:"vt_customer" org_name:"c1" column_length:20 charset:63 flags:53251} rows:{lengths:1 values:"1"}|0|{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 1, "MismatchedRows": 0, "ExtraRowsSource": 0, "ExtraRowsTarget": 0}`,
+ ), nil)
+
+ // Now let's short circuit the vdiff as we know that the open has worked as expected.
+ shortCircuitTestAfterQuery("update _vt.vdiff_table set table_rows = 1 where vdiff_id = 1 and table_name = 't1'", dbClient)
+
+ vde.Open(context.Background(), vreplEngine)
+ defer vde.Close()
+ assert.True(t, vde.IsOpen())
+ assert.Equal(t, 1, len(vde.controllers))
+ dbClient.Wait()
+ })
+ }
+}
+
+func TestEngineRetryErroredVDiffs(t *testing.T) {
+ UUID := uuid.New().String()
+ source := `keyspace:"testsrc" shard:"0" filter:{rules:{match:"t1" filter:"select * from t1"}}`
+ expectedControllerCnt := 0
+ tests := []struct {
+ name string
+ retryQueryResults *sqltypes.Result
+ expectRetry bool
+ }{
+ {
+ name: "nothing to retry",
+ retryQueryResults: noResults,
+ },
+ {
+ name: "non-ephemeral error",
+ retryQueryResults: sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ vdiffTestCols,
+ vdiffTestColTypes,
+ ),
+ fmt.Sprintf("1|%s|%s|%s|%s|%s|error|%s|%v", UUID, wfName, env.KeyspaceName, env.ShardName, vdiffdb, optionsJS,
+ mysql.NewSQLError(mysql.ERNoSuchTable, "42S02", "Table 'foo' doesn't exist")),
+ ),
+ },
+ {
+ name: "ephemeral error",
+ retryQueryResults: sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ vdiffTestCols,
+ vdiffTestColTypes,
+ ),
+ fmt.Sprintf("1|%s|%s|%s|%s|%s|error|%s|%v", UUID, wfName, env.KeyspaceName, env.ShardName, vdiffdb, optionsJS,
+ mysql.NewSQLError(mysql.ERLockWaitTimeout, "HY000", "Lock wait timeout exceeded; try restarting transaction")),
+ ),
+ expectRetry: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tablet := addTablet(100)
+ tablet.Type = topodatapb.TabletType_PRIMARY
+ defer deleteTablet(tablet)
+ resetBinlogClient()
+ dbClient := binlogplayer.NewMockDBClient(t)
+ dbClientFactory := func() binlogplayer.DBClient { return dbClient }
+ vde := &Engine{
+ controllers: make(map[int64]*controller),
+ ts: env.TopoServ,
+ thisTablet: tablet,
+ dbClientFactoryFiltered: dbClientFactory,
+ dbClientFactoryDba: dbClientFactory,
+ dbName: vdiffdb,
+ }
+ require.False(t, vde.IsOpen())
+
+ dbClient.ExpectRequest("select * from _vt.vdiff where state in ('started','pending')", noResults, nil)
+ vde.Open(context.Background(), vreplEngine)
+ defer vde.Close()
+ assert.True(t, vde.IsOpen())
+ assert.Equal(t, 0, len(vde.controllers))
+
+ dbClient.ExpectRequest("select * from _vt.vdiff where state = 'error' and options->>'$.core_options.auto_retry' = 'true'", tt.retryQueryResults, nil)
+ // Right now this only supports a single row as with multiple rows we have
+ // multiple controllers in separate goroutines and the order is not
+ // guaranteed. If we want to support multiple rows here then we'll need to
+ // switch to using the queryhistory package. That will also require building
+ // out that package to support MockDBClient and its Expect* functions
+ // (query+results+err) as right now it only supports a real DBClient and
+ // checks for query execution.
+ for _, row := range tt.retryQueryResults.Rows {
+ id := row[0].ToString()
+ if tt.expectRetry {
+ dbClient.ExpectRequestRE("update _vt.vdiff as vd left join _vt.vdiff_table as vdt on \\(vd.id = vdt.vdiff_id\\) set vd.state = 'pending'.*", singleRowAffected, nil)
+ dbClient.ExpectRequest(fmt.Sprintf("select * from _vt.vdiff where id = %s", id), sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ vdiffTestCols,
+ vdiffTestColTypes,
+ ),
+ fmt.Sprintf("%s|%s|%s|%s|%s|%s|pending|%s|", id, UUID, wfName, env.KeyspaceName, env.ShardName, vdiffdb, optionsJS),
+ ), nil)
+ dbClient.ExpectRequest(fmt.Sprintf("select * from _vt.vreplication where workflow = '%s' and db_name = '%s'", wfName, vdiffdb), sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|workflow|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type",
+ "int64|varbinary|blob|varbinary|varbinary|int64|int64|varbinary|varbinary|int64|int64|varbinary|varbinary|varbinary|int64|varbinary|int64|int64|int64|varchar|int64",
+ ),
+ fmt.Sprintf("%s|%s|%s|MySQL56/f69ed286-6909-11ed-8342-0a50724f3211:1-110||9223372036854775807|9223372036854775807||PRIMARY,REPLICA|1669511347|0|Running||%s|200||1669511347|1|0||1", id, wfName, source, vdiffdb),
+ ), nil)
+
+ // At this point we know that we kicked off the expected retry so we can short circit the vdiff.
+ shortCircuitTestAfterQuery(fmt.Sprintf("update _vt.vdiff set state = 'started', last_error = '' , started_at = utc_timestamp() where id = %s", id), dbClient)
+
+ expectedControllerCnt++
+ }
+ }
+
+ err := vde.retryVDiffs(vde.ctx)
+ assert.NoError(t, err)
+ assert.Equal(t, expectedControllerCnt, len(vde.controllers))
+ dbClient.Wait()
+ })
+ }
+
+}
diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go
new file mode 100644
index 00000000000..80fb1f4c443
--- /dev/null
+++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go
@@ -0,0 +1,417 @@
+/*
+Copyright 2022 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vdiff
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/binlog/binlogplayer"
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "vitess.io/vitess/go/vt/vttablet/queryservice/fakes"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+ "vitess.io/vitess/go/vt/vttablet/tabletconntest"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+ "vitess.io/vitess/go/vt/vttablet/tmclienttest"
+ "vitess.io/vitess/go/vt/withddl"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+var (
+ vstreamerEngine *vstreamer.Engine
+ vreplEngine *vreplication.Engine
+ env *testenv.Env
+ tmc = newFakeTMClient()
+ globalFBC = &fakeBinlogClient{}
+ globalDBQueries = make(chan string, 1000)
+ vdiffdb = "vdiff_test"
+ doNotLogDBQueries = false
+)
+
+type LogExpectation struct {
+ Type string
+ Detail string
+}
+
+func init() {
+ tabletconn.RegisterDialer("test", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ return &fakeTabletConn{
+ QueryService: fakes.ErrorQueryService,
+ tablet: tablet,
+ }, nil
+ })
+ tabletconntest.SetProtocol("go.vt.vttablet.tabletmanager.vdiff.framework_test", "test")
+
+ binlogplayer.RegisterClientFactory("test", func() binlogplayer.Client { return globalFBC })
+
+ tmclient.RegisterTabletManagerClientFactory("test", func() tmclient.TabletManagerClient { return tmc })
+ tmclienttest.SetProtocol("go.vt.vttablet.tabletmanager.vdiff.framework_test", "test")
+}
+
+func TestMain(m *testing.M) {
+ binlogplayer.SetProtocol("vdiff_test_framework", "test")
+ exitCode := func() int {
+ var err error
+ env, err = testenv.Init()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ return 1
+ }
+ defer env.Close()
+
+ vstreamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0])
+ vstreamerEngine.InitDBConfig(env.KeyspaceName, env.ShardName)
+ vstreamerEngine.Open()
+ defer vstreamerEngine.Close()
+
+ ddls := binlogplayer.CreateVReplicationTable()
+ ddls = append(ddls, binlogplayer.AlterVReplicationTable...)
+ ddls = append(ddls, withDDL.DDLs()...)
+ ddls = append(ddls, fmt.Sprintf("create database %s", vdiffdb))
+
+ for _, ddl := range ddls {
+ if err := env.Mysqld.ExecuteSuperQuery(context.Background(), ddl); err != nil {
+ fmt.Fprintf(os.Stderr, "%v", err)
+ }
+ }
+
+ vreplEngine = vreplication.NewTestEngine(env.TopoServ, env.Cells[0], env.Mysqld, realDBClientFactory, realDBClientFactory, vdiffdb, nil)
+ vreplEngine.Open(context.Background())
+ defer vreplEngine.Close()
+
+ tmc.schema = testSchema
+
+ return m.Run()
+ }()
+ os.Exit(exitCode)
+}
+
+func resetBinlogClient() {
+ globalFBC = &fakeBinlogClient{}
+}
+
+// shortCircuitTestAfterQuery is used to short circuit a test after a specific query is executed.
+// This can be used to end a vdiff, by returning an error from the specified query, once the test
+// has verified the necessary behavior.
+func shortCircuitTestAfterQuery(query string, dbClient *binlogplayer.MockDBClient) {
+ dbClient.ExpectRequest(query, singleRowAffected, fmt.Errorf("Short circuiting test"))
+ dbClient.ExpectRequest("update _vt.vdiff set state = 'error', last_error = 'Short circuiting test' where id = 1", singleRowAffected, nil)
+ dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'State changed to: error')", singleRowAffected, nil)
+ dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'Error: Short circuiting test')", singleRowAffected, nil)
+}
+
+//--------------------------------------
+// Topos and tablets
+
+func addTablet(id int) *topodatapb.Tablet {
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: env.Cells[0],
+ Uid: uint32(id),
+ },
+ Keyspace: env.KeyspaceName,
+ Shard: env.ShardName,
+ KeyRange: &topodatapb.KeyRange{},
+ Type: topodatapb.TabletType_REPLICA,
+ PortMap: map[string]int32{
+ "test": int32(id),
+ },
+ }
+ if err := env.TopoServ.CreateTablet(context.Background(), tablet); err != nil {
+ panic(err)
+ }
+ env.SchemaEngine.Reload(context.Background())
+ return tablet
+}
+
+func deleteTablet(tablet *topodatapb.Tablet) {
+ env.TopoServ.DeleteTablet(context.Background(), tablet.Alias)
+ // This is not automatically removed from shard replication, which results in log spam.
+ topo.DeleteTabletReplicationData(context.Background(), env.TopoServ, tablet)
+ env.SchemaEngine.Reload(context.Background())
+}
+
+// fakeTabletConn implement TabletConn interface. We only care about the
+// health check part. The state reported by the tablet will depend
+// on the Tag values "serving" and "healthy".
+type fakeTabletConn struct {
+ queryservice.QueryService
+ tablet *topodatapb.Tablet
+}
+
+// StreamHealth is part of queryservice.QueryService.
+func (ftc *fakeTabletConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error {
+ return callback(&querypb.StreamHealthResponse{
+ Serving: true,
+ Target: &querypb.Target{
+ Keyspace: ftc.tablet.Keyspace,
+ Shard: ftc.tablet.Shard,
+ TabletType: ftc.tablet.Type,
+ },
+ RealtimeStats: &querypb.RealtimeStats{},
+ })
+}
+
+// vstreamHook allows you to do work just before calling VStream.
+var vstreamHook func(ctx context.Context)
+
+// VStream directly calls into the pre-initialized engine.
+func (ftc *fakeTabletConn) VStream(ctx context.Context, request *binlogdatapb.VStreamRequest, send func([]*binlogdatapb.VEvent) error) error {
+ if request.Target.Keyspace != "vttest" {
+ <-ctx.Done()
+ return io.EOF
+ }
+ if vstreamHook != nil {
+ vstreamHook(ctx)
+ }
+ return vstreamerEngine.Stream(ctx, request.Position, request.TableLastPKs, request.Filter, send)
+}
+
+// vstreamRowsHook allows you to do work just before calling VStreamRows.
+var vstreamRowsHook func(ctx context.Context)
+
+// vstreamRowsSendHook allows you to do work just before VStreamRows calls send.
+var vstreamRowsSendHook func(ctx context.Context)
+
+// VStreamRows directly calls into the pre-initialized engine.
+func (ftc *fakeTabletConn) VStreamRows(ctx context.Context, request *binlogdatapb.VStreamRowsRequest, send func(*binlogdatapb.VStreamRowsResponse) error) error {
+ if vstreamRowsHook != nil {
+ vstreamRowsHook(ctx)
+ }
+ var row []sqltypes.Value
+ if request.Lastpk != nil {
+ r := sqltypes.Proto3ToResult(request.Lastpk)
+ if len(r.Rows) != 1 {
+ return fmt.Errorf("unexpected lastpk input: %v", request.Lastpk)
+ }
+ row = r.Rows[0]
+ }
+ return vstreamerEngine.StreamRows(ctx, request.Query, row, func(rows *binlogdatapb.VStreamRowsResponse) error {
+ if vstreamRowsSendHook != nil {
+ vstreamRowsSendHook(ctx)
+ }
+ return send(rows)
+ })
+}
+
+//--------------------------------------
+// Binlog Client to TabletManager
+
+// fakeBinlogClient satisfies binlogplayer.Client.
+// Not to be used concurrently.
+type fakeBinlogClient struct {
+ lastTablet *topodatapb.Tablet
+ lastPos string
+ lastTables []string
+ lastKeyRange *topodatapb.KeyRange
+ lastCharset *binlogdatapb.Charset
+}
+
+func (fbc *fakeBinlogClient) Dial(tablet *topodatapb.Tablet) error {
+ fbc.lastTablet = tablet
+ return nil
+}
+
+func (fbc *fakeBinlogClient) Close() {
+}
+
+func (fbc *fakeBinlogClient) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset) (binlogplayer.BinlogTransactionStream, error) {
+ fbc.lastPos = position
+ fbc.lastTables = tables
+ fbc.lastCharset = charset
+ return &btStream{ctx: ctx}, nil
+}
+
+func (fbc *fakeBinlogClient) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset) (binlogplayer.BinlogTransactionStream, error) {
+ fbc.lastPos = position
+ fbc.lastKeyRange = keyRange
+ fbc.lastCharset = charset
+ return &btStream{ctx: ctx}, nil
+}
+
+// btStream satisfies binlogplayer.BinlogTransactionStream
+type btStream struct {
+ ctx context.Context
+ sent bool
+}
+
+func (bts *btStream) Recv() (*binlogdatapb.BinlogTransaction, error) {
+ if !bts.sent {
+ bts.sent = true
+ return &binlogdatapb.BinlogTransaction{
+ Statements: []*binlogdatapb.BinlogTransaction_Statement{
+ {
+ Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT,
+ Sql: []byte("insert into t values(1)"),
+ },
+ },
+ EventToken: &querypb.EventToken{
+ Timestamp: 72,
+ Position: "MariaDB/0-1-1235",
+ },
+ }, nil
+ }
+ <-bts.ctx.Done()
+ return nil, bts.ctx.Err()
+}
+
+//--------------------------------------
+// DBCLient wrapper
+
+func realDBClientFactory() binlogplayer.DBClient {
+ return &realDBClient{}
+}
+
+type realDBClient struct {
+ conn *mysql.Conn
+ nolog bool
+}
+
+func (dbc *realDBClient) DBName() string {
+ return vdiffdb
+}
+
+func (dbc *realDBClient) Connect() error {
+ app, err := env.Dbcfgs.AppWithDB().MysqlParams()
+ if err != nil {
+ return err
+ }
+ app.DbName = vdiffdb
+ conn, err := mysql.Connect(context.Background(), app)
+ if err != nil {
+ return err
+ }
+ dbc.conn = conn
+ return nil
+}
+
+func (dbc *realDBClient) Begin() error {
+ _, err := dbc.ExecuteFetch("begin", 10000)
+ return err
+}
+
+func (dbc *realDBClient) Commit() error {
+ _, err := dbc.ExecuteFetch("commit", 10000)
+ return err
+}
+
+func (dbc *realDBClient) Rollback() error {
+ _, err := dbc.ExecuteFetch("rollback", 10000)
+ return err
+}
+
+func (dbc *realDBClient) Close() {
+ dbc.conn.Close()
+}
+
+func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) {
+ // Use Clone() because the contents of memory region referenced by
+ // string can change when clients (e.g. vcopier) use unsafe string methods.
+ query = strings.Clone(query)
+ if strings.HasPrefix(query, "use") ||
+ query == withddl.QueryToTriggerWithDDL { // this query breaks unit tests since it errors out
+ return nil, nil
+ }
+ qr, err := dbc.conn.ExecuteFetch(query, 10000, true)
+ if doNotLogDBQueries {
+ return qr, err
+ }
+ if !strings.HasPrefix(query, "select") && !strings.HasPrefix(query, "set") && !dbc.nolog {
+ globalDBQueries <- query
+ }
+ return qr, err
+}
+
+//----------------------------------------------
+// fakeTMClient
+
+type fakeTMClient struct {
+ tmclient.TabletManagerClient
+ schema *tabletmanagerdatapb.SchemaDefinition
+ vrQueries map[int]map[string]*querypb.QueryResult
+ waitpos map[int]string
+ vrpos map[int]string
+ pos map[int]string
+}
+
+func newFakeTMClient() *fakeTMClient {
+ return &fakeTMClient{
+ vrQueries: make(map[int]map[string]*querypb.QueryResult),
+ waitpos: make(map[int]string),
+ vrpos: make(map[int]string),
+ pos: make(map[int]string),
+ }
+}
+
+func (tmc *fakeTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) {
+ return tmc.schema, nil
+}
+
+func (tmc *fakeTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) {
+ result, ok := tmc.vrQueries[int(tablet.Alias.Uid)][query]
+ if !ok {
+ return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid)
+ }
+ return result, nil
+}
+
+func (tmc *fakeTMClient) WaitForPosition(ctx context.Context, tablet *topodatapb.Tablet, pos string) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if pos != tmc.waitpos[int(tablet.Alias.Uid)] {
+ return fmt.Errorf("waitpos %s not reached for tablet %d", pos, tablet.Alias.Uid)
+ }
+ return nil
+}
+
+func (tmc *fakeTMClient) VReplicationWaitForPos(ctx context.Context, tablet *topodatapb.Tablet, id int, pos string) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if pos != tmc.vrpos[int(tablet.Alias.Uid)] {
+ return fmt.Errorf("vrpos %s not reached for tablet %d", pos, tablet.Alias.Uid)
+ }
+ return nil
+}
+
+func (tmc *fakeTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) {
+ pos, ok := tmc.pos[int(tablet.Alias.Uid)]
+ if !ok {
+ return "", fmt.Errorf("no primary position for %d", tablet.Alias.Uid)
+ }
+ return pos, nil
+}
diff --git a/go/vt/vttablet/tabletmanager/vdiff/schema.go b/go/vt/vttablet/tabletmanager/vdiff/schema.go
index a724e474a44..e272a6ac74b 100644
--- a/go/vt/vttablet/tabletmanager/vdiff/schema.go
+++ b/go/vt/vttablet/tabletmanager/vdiff/schema.go
@@ -54,7 +54,7 @@ const (
db_name varbinary(1024),
state varbinary(1024),
options json,
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ created_at timestamp DEFAULT CURRENT_TIMESTAMP,
started_timestamp timestamp NULL DEFAULT NULL,
liveness_timestamp timestamp NULL DEFAULT NULL,
completed_timestamp timestamp NULL DEFAULT NULL,
@@ -70,8 +70,8 @@ const (
rows_compared int not null default 0,
mismatch bool not null default false,
report json,
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ created_at timestamp DEFAULT CURRENT_TIMESTAMP,
+ updated_at timestamp DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
primary key (vdiff_id, table_name)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4`
sqlCreateVDiffLogTable = `CREATE TABLE IF NOT EXISTS _vt.vdiff_log (
@@ -85,32 +85,31 @@ const (
sqlResumeVDiff = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.options = %s, vd.started_at = NULL, vd.completed_at = NULL, vd.state = 'pending',
vdt.state = 'pending' where vd.vdiff_uuid = %s and vd.id = vdt.vdiff_id and vd.state in ('completed', 'stopped')
and vdt.state in ('completed', 'stopped')`
- sqlRetryVDiff = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.state = 'pending', vd.last_error = '', vdt.state = 'pending'
- where vd.id = %d and vd.id = vdt.vdiff_id and vd.state = 'error' and vdt.state = 'error'`
+ sqlRetryVDiff = `update _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) set vd.state = 'pending',
+ vd.last_error = '', vdt.state = 'pending' where vd.id = %d and (vd.state = 'error' or vdt.state = 'error')`
sqlGetVDiffByKeyspaceWorkflowUUID = "select * from _vt.vdiff where keyspace = %s and workflow = %s and vdiff_uuid = %s"
sqlGetMostRecentVDiff = "select * from _vt.vdiff where keyspace = %s and workflow = %s order by id desc limit 1"
sqlGetVDiffByID = "select * from _vt.vdiff where id = %d"
- sqlDeleteVDiffs = `delete from vd, vdt, vdl using _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
- inner join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id)
+ sqlDeleteVDiffs = `delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
+ left join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id)
where vd.keyspace = %s and vd.workflow = %s`
- sqlDeleteVDiffByUUID = `delete from vd, vdt using _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
+ sqlDeleteVDiffByUUID = `delete from vd, vdt using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
and vd.vdiff_uuid = %s`
sqlVDiffSummary = `select vd.state as vdiff_state, vd.last_error as last_error, vdt.table_name as table_name,
vd.vdiff_uuid as 'uuid', vdt.state as table_state, vdt.table_rows as table_rows,
vd.started_at as started_at, vdt.table_rows as table_rows, vdt.rows_compared as rows_compared,
vd.completed_at as completed_at, IF(vdt.mismatch = 1, 1, 0) as has_mismatch, vdt.report as report
- from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
- where vdt.vdiff_id = %d`
+ from _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id)
+ where vd.id = %d`
// sqlUpdateVDiffState has a penultimate placeholder for any additional columns you want to update, e.g. `, foo = 1`
sqlUpdateVDiffState = "update _vt.vdiff set state = %s, last_error = %s %s where id = %d"
sqlUpdateVDiffStopped = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.state = 'stopped', vdt.state = 'stopped', vd.last_error = ''
where vd.id = vdt.vdiff_id and vd.id = %d and vd.state != 'completed'`
sqlGetVReplicationEntry = "select * from _vt.vreplication %s"
- sqlGetPendingVDiffs = "select * from _vt.vdiff where state = 'pending'"
+ sqlGetVDiffsToRun = "select * from _vt.vdiff where state in ('started','pending')" // what VDiffs have not been stopped or completed
sqlGetVDiffsToRetry = "select * from _vt.vdiff where state = 'error' and options->>'$.core_options.auto_retry' = 'true'"
sqlGetVDiffID = "select id as id from _vt.vdiff where vdiff_uuid = %s"
sqlGetAllVDiffs = "select * from _vt.vdiff order by id desc"
- sqlGetTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name = %s"
sqlGetAllTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name in (%s)"
sqlNewVDiffTable = "insert into _vt.vdiff_table(vdiff_id, table_name, state, table_rows) values(%d, %s, 'pending', %d)"
diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go
index 2617eb83831..bc3335e305b 100644
--- a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go
+++ b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go
@@ -218,7 +218,7 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri
go func() {
defer wg.Done()
err1 = td.forEachSource(func(source *migrationSource) error {
- tablet, err := pickTablet(ctx, sourceTopoServer, cell, ct.sourceKeyspace, source.shard, tabletTypes)
+ tablet, err := pickTablet(ctx, sourceTopoServer, cell, ct.vde.thisTablet.Alias.Cell, ct.sourceKeyspace, source.shard, tabletTypes)
if err != nil {
return err
}
@@ -230,7 +230,7 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri
wg.Add(1)
go func() {
defer wg.Done()
- tablet, err2 := pickTablet(ctx, ct.ts, td.wd.opts.PickerOptions.TargetCell, ct.vde.thisTablet.Keyspace,
+ tablet, err2 := pickTablet(ctx, ct.ts, td.wd.opts.PickerOptions.TargetCell, ct.vde.thisTablet.Alias.Cell, ct.vde.thisTablet.Keyspace,
ct.vde.thisTablet.Shard, td.wd.opts.PickerOptions.TabletTypes)
if err2 != nil {
return
@@ -248,8 +248,8 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri
return err2
}
-func pickTablet(ctx context.Context, ts *topo.Server, cell, keyspace, shard, tabletTypes string) (*topodata.Tablet, error) {
- tp, err := discovery.NewTabletPicker(ts, []string{cell}, keyspace, shard, tabletTypes)
+func pickTablet(ctx context.Context, ts *topo.Server, cell, localCell, keyspace, shard, tabletTypes string) (*topodata.Tablet, error) {
+ tp, err := discovery.NewTabletPicker(ctx, ts, []string{cell}, localCell, keyspace, shard, tabletTypes, discovery.TabletPickerOptions{})
if err != nil {
return nil, err
}
@@ -671,24 +671,6 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D
return nil
}
-func (td *tableDiffer) updateTableRows(ctx context.Context, dbClient binlogplayer.DBClient) error {
- query := fmt.Sprintf(sqlGetTableRows, encodeString(td.wd.ct.vde.dbName), encodeString(td.table.Name))
- qr, err := dbClient.ExecuteFetch(query, 1)
- if err != nil {
- return err
- }
- if len(qr.Rows) == 0 {
- return fmt.Errorf("no information_schema status found for table %s on tablet %v",
- td.table.Name, td.wd.ct.vde.thisTablet.Alias)
- }
- row := qr.Named().Row()
- query = fmt.Sprintf(sqlUpdateTableRows, row.AsInt64("table_rows", 0), td.wd.ct.id, encodeString(td.table.Name))
- if _, err := dbClient.ExecuteFetch(query, 1); err != nil {
- return err
- }
- return nil
-}
-
func (td *tableDiffer) updateTableState(ctx context.Context, dbClient binlogplayer.DBClient, state VDiffState) error {
query := fmt.Sprintf(sqlUpdateTableState, encodeString(string(state)), td.wd.ct.id, encodeString(td.table.Name))
if _, err := dbClient.ExecuteFetch(query, 1); err != nil {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go
index 7425427ac53..a28804a74c2 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/controller.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go
@@ -28,8 +28,8 @@ import (
"vitess.io/vitess/go/vt/vterrors"
"context"
+ "sync/atomic"
- "vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/tb"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/log"
@@ -40,6 +40,13 @@ import (
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
+const (
+ // How many times to retry tablet selection before we
+ // give up and return an error message that the user
+ // can see and act upon if needed.
+ tabletPickerRetries = 5
+)
+
// controller is created by Engine. Members are initialized upfront.
// There is no mutex within a controller becaust its members are
// either read-only or self-synchronized.
@@ -59,9 +66,9 @@ type controller struct {
done chan struct{}
// The following fields are updated after start. So, they need synchronization.
- sourceTablet sync2.AtomicString
+ sourceTablet atomic.Value
- lastWorkflowError *lastError
+ lastWorkflowError *vterrors.LastError
}
// newController creates a new controller. Unless a stream is explicitly 'Stopped',
@@ -72,14 +79,14 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor
}
ct := &controller{
- vre: vre,
- dbClientFactory: dbClientFactory,
- mysqld: mysqld,
- blpStats: blpStats,
- done: make(chan struct{}),
- source: &binlogdatapb.BinlogSource{},
- lastWorkflowError: newLastError("VReplication Controller", maxTimeToRetryError),
+ vre: vre,
+ dbClientFactory: dbClientFactory,
+ mysqld: mysqld,
+ blpStats: blpStats,
+ done: make(chan struct{}),
+ source: &binlogdatapb.BinlogSource{},
}
+ ct.sourceTablet.Store(&topodatapb.TabletAlias{})
log.Infof("creating controller with cell: %v, tabletTypes: %v, and params: %v", cell, tabletTypesStr, params)
// id
@@ -89,6 +96,7 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor
}
ct.id = uint32(id)
ct.workflow = params["workflow"]
+ ct.lastWorkflowError = vterrors.NewLastError(fmt.Sprintf("VReplication controller %d for workflow %q", ct.id, ct.workflow), maxTimeToRetryError)
state := params["state"]
blpStats.State.Set(state)
@@ -123,7 +131,7 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor
return nil, err
}
}
- tp, err := discovery.NewTabletPicker(sourceTopo, cells, ct.source.Keyspace, ct.source.Shard, tabletTypesStr)
+ tp, err := discovery.NewTabletPicker(ctx, sourceTopo, cells, ct.vre.cell, ct.source.Keyspace, ct.source.Shard, tabletTypesStr, discovery.TabletPickerOptions{})
if err != nil {
return nil, err
}
@@ -173,7 +181,7 @@ func (ct *controller) run(ctx context.Context) {
func (ct *controller) runBlp(ctx context.Context) (err error) {
defer func() {
- ct.sourceTablet.Set("")
+ ct.sourceTablet.Store(&topodatapb.TabletAlias{})
if x := recover(); x != nil {
log.Errorf("stream %v: caught panic: %v\n%s", ct.id, x, tb.Stack(4))
err = fmt.Errorf("panic: %v", x)
@@ -203,23 +211,11 @@ func (ct *controller) runBlp(ctx context.Context) (err error) {
}
defer dbClient.Close()
- var tablet *topodatapb.Tablet
- if ct.source.GetExternalMysql() == "" {
- log.Infof("trying to find a tablet eligible for vreplication. stream id: %v", ct.id)
- tablet, err = ct.tabletPicker.PickForStreaming(ctx)
- if err != nil {
- select {
- case <-ctx.Done():
- default:
- ct.blpStats.ErrorCounts.Add([]string{"No Source Tablet Found"}, 1)
- ct.setMessage(dbClient, fmt.Sprintf("Error picking tablet: %s", err.Error()))
- }
- return err
- }
- ct.setMessage(dbClient, fmt.Sprintf("Picked source tablet: %s", tablet.Alias.String()))
- log.Infof("found a tablet eligible for vreplication. stream id: %v tablet: %s", ct.id, tablet.Alias.String())
- ct.sourceTablet.Set(tablet.Alias.String())
+ tablet, err := ct.pickSourceTablet(ctx, dbClient)
+ if err != nil {
+ return err
}
+
switch {
case len(ct.source.Tables) > 0:
// Table names can have search patterns. Resolve them against the schema.
@@ -267,10 +263,12 @@ func (ct *controller) runBlp(ctx context.Context) (err error) {
vr := newVReplicator(ct.id, ct.source, vsClient, ct.blpStats, dbClient, ct.mysqld, ct.vre)
err = vr.Replicate(ctx)
- ct.lastWorkflowError.record(err)
+ ct.lastWorkflowError.Record(err)
+
// If this is a mysql error that we know needs manual intervention OR
- // we cannot identify this as non-recoverable, but it has persisted beyond the retry limit (maxTimeToRetryError)
- if isUnrecoverableError(err) || !ct.lastWorkflowError.shouldRetry() {
+ // we cannot identify this as non-recoverable, but it has persisted
+ // beyond the retry limit (maxTimeToRetryError).
+ if isUnrecoverableError(err) || !ct.lastWorkflowError.ShouldRetry() {
log.Errorf("vreplication stream %d going into error state due to %+v", ct.id, err)
if errSetState := vr.setState(binlogplayer.BlpError, err.Error()); errSetState != nil {
return err // yes, err and not errSetState.
@@ -294,6 +292,35 @@ func (ct *controller) setMessage(dbClient binlogplayer.DBClient, message string)
}
return nil
}
+
+// pickSourceTablet picks a healthy serving tablet to source for
+// the vreplication stream. If the source is marked as external, it
+// returns nil.
+func (ct *controller) pickSourceTablet(ctx context.Context, dbClient binlogplayer.DBClient) (*topodatapb.Tablet, error) {
+ if ct.source.GetExternalMysql() != "" {
+ return nil, nil
+ }
+ log.Infof("Trying to find an eligible source tablet for vreplication stream id %d for workflow: %s",
+ ct.id, ct.workflow)
+ tpCtx, tpCancel := context.WithTimeout(ctx, discovery.GetTabletPickerRetryDelay()*tabletPickerRetries)
+ defer tpCancel()
+ tablet, err := ct.tabletPicker.PickForStreaming(tpCtx)
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ default:
+ ct.blpStats.ErrorCounts.Add([]string{"No Source Tablet Found"}, 1)
+ ct.setMessage(dbClient, fmt.Sprintf("Error picking tablet: %s", err.Error()))
+ }
+ return tablet, err
+ }
+ ct.setMessage(dbClient, fmt.Sprintf("Picked source tablet: %s", tablet.Alias.String()))
+ log.Infof("Found eligible source tablet %s for vreplication stream id %d for workflow %s",
+ tablet.Alias.String(), ct.id, ct.workflow)
+ ct.sourceTablet.Store(tablet.Alias)
+ return tablet, err
+}
+
func (ct *controller) Stop() {
ct.cancel()
<-ct.done
diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go
index c73f6be56ae..4886d595d10 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go
@@ -23,12 +23,12 @@ import (
"testing"
"time"
+ "vitess.io/vitess/go/vt/mysqlctl"
querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
"vitess.io/vitess/go/vt/mysqlctl/tmutils"
tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
@@ -87,9 +87,10 @@ func TestControllerKeyRange(t *testing.T) {
dbClient.ExpectRequest("commit", nil, nil)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
- ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, nil)
+ ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre)
if err != nil {
t.Fatal(err)
}
@@ -123,7 +124,7 @@ func TestControllerTables(t *testing.T) {
dbClient.ExpectRequest("commit", nil, nil)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{
+ mysqld := &mysqlctl.FakeMysqlDaemon{
MysqlPort: sync2.NewAtomicInt32(3306),
Schema: &tabletmanagerdatapb.SchemaDefinition{
DatabaseSchema: "",
@@ -149,8 +150,10 @@ func TestControllerTables(t *testing.T) {
},
},
}
+ mysqld.MysqlPort.Set(3306)
+ vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
- ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, nil)
+ ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre)
if err != nil {
t.Fatal(err)
}
@@ -216,9 +219,10 @@ func TestControllerOverrides(t *testing.T) {
dbClient.ExpectRequest("commit", nil, nil)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
- ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, nil)
+ ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre)
if err != nil {
t.Fatal(err)
}
@@ -232,7 +236,8 @@ func TestControllerOverrides(t *testing.T) {
}
func TestControllerCanceledContext(t *testing.T) {
- defer deleteTablet(addTablet(100))
+ wantTablet := addTablet(100)
+ defer deleteTablet(wantTablet)
params := map[string]string{
"id": "1",
@@ -242,7 +247,9 @@ func TestControllerCanceledContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
- ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil, nil)
+ vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, nil, nil, nil, "", nil)
+
+ ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil, vre)
if err != nil {
t.Fatal(err)
}
@@ -284,9 +291,10 @@ func TestControllerRetry(t *testing.T) {
dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil)
dbClient.ExpectRequest("commit", nil, nil)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ vre := NewTestEngine(nil, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
- ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, nil)
+ ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre)
if err != nil {
t.Fatal(err)
}
@@ -342,9 +350,10 @@ func TestControllerStopPosition(t *testing.T) {
dbClient.ExpectRequest("update _vt.vreplication set state='Stopped', message='Reached stopping position, done playing logs' where id=1", testDMLResponse, nil)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
- ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, nil)
+ ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre)
if err != nil {
t.Fatal(err)
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go
index afce87aa630..ef0d1376857 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/engine.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go
@@ -739,7 +739,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int, pos string) error {
return fmt.Errorf("unexpected result: %v", qr)
}
- // When err is not nil then we got a retryable error and will loop again
+ // When err is not nil then we got a retryable error and will loop again.
if err == nil {
current, dcerr := binlogplayer.DecodePosition(qr.Rows[0][0].ToString())
if dcerr != nil {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go
index 4c57f21dca1..0b61d732ce9 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go
@@ -32,7 +32,7 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
)
func TestEngineOpen(t *testing.T) {
@@ -42,7 +42,7 @@ func TestEngineOpen(t *testing.T) {
resetBinlogClient()
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
require.False(t, vre.IsOpen())
@@ -82,7 +82,7 @@ func TestEngineOpenRetry(t *testing.T) {
resetBinlogClient()
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
@@ -143,7 +143,7 @@ func TestEngineExec(t *testing.T) {
resetBinlogClient()
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
// Test Insert
@@ -305,7 +305,7 @@ func TestEngineBadInsert(t *testing.T) {
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
@@ -333,7 +333,7 @@ func TestEngineSelect(t *testing.T) {
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
@@ -366,7 +366,7 @@ func TestWaitForPos(t *testing.T) {
waitRetryTime = 10 * time.Millisecond
dbClient := binlogplayer.NewMockDBClient(t)
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
@@ -394,7 +394,7 @@ func TestWaitForPos(t *testing.T) {
func TestWaitForPosError(t *testing.T) {
dbClient := binlogplayer.NewMockDBClient(t)
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
@@ -430,7 +430,7 @@ func TestWaitForPosError(t *testing.T) {
func TestWaitForPosCancel(t *testing.T) {
dbClient := binlogplayer.NewMockDBClient(t)
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
@@ -474,7 +474,7 @@ func TestCreateDBAndTable(t *testing.T) {
resetBinlogClient()
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
// Test Insert
@@ -570,7 +570,7 @@ func TestGetDBClient(t *testing.T) {
dbClientFactoryDba := func() binlogplayer.DBClient { return dbClientDba }
dbClientFactoryFiltered := func() binlogplayer.DBClient { return dbClientFiltered }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
vre := NewTestEngine(env.TopoServ, env.Cells[0], mysqld, dbClientFactoryFiltered, dbClientFactoryDba, dbClientDba.DBName(), nil)
shouldBeDbaClient := vre.getDBClient(true /*runAsAdmin*/)
diff --git a/go/vt/vttablet/tabletmanager/vreplication/fuzz.go b/go/vt/vttablet/tabletmanager/vreplication/fuzz.go
index 55c1e743dad..0fcfcce9660 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/fuzz.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/fuzz.go
@@ -25,7 +25,7 @@ import (
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/topo/memorytopo"
fuzz "github.com/AdaLogics/go-fuzz-headers"
@@ -94,7 +94,7 @@ func FuzzEngine(data []byte) int {
resetBinlogClient()
dbClient := binlogplayer.NewMockDBClient(t)
dbClientFactory := func() binlogplayer.DBClient { return dbClient }
- mysqld := &fakemysqldaemon.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
+ mysqld := &mysqlctl.FakeMysqlDaemon{MysqlPort: sync2.NewAtomicInt32(3306)}
vre := NewTestEngine(topoServer, "cell1", mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil)
diff --git a/go/vt/vttablet/tabletmanager/vreplication/last_error.go b/go/vt/vttablet/tabletmanager/vreplication/last_error.go
index e3a73d5b062..5d51fc44f06 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/last_error.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/last_error.go
@@ -26,18 +26,20 @@ import (
/*
* lastError tracks the most recent error for any ongoing process and how long it has persisted.
- * The err field should be a vterror so as to ensure we have meaningful error codes, causes, stack
+ * The err field should be a vterror to ensure we have meaningful error codes, causes, stack
* traces, etc.
*/
type lastError struct {
name string
err error
firstSeen time.Time
+ lastSeen time.Time
mu sync.Mutex
maxTimeInError time.Duration // if error persists for this long, shouldRetry() will return false
}
func newLastError(name string, maxTimeInError time.Duration) *lastError {
+ log.Infof("Created last error: %s, with maxTimeInError: %s", name, maxTimeInError)
return &lastError{
name: name,
maxTimeInError: maxTimeInError,
@@ -48,15 +50,27 @@ func (le *lastError) record(err error) {
le.mu.Lock()
defer le.mu.Unlock()
if err == nil {
+ log.Infof("Resetting last error: %s", le.name)
le.err = nil
le.firstSeen = time.Time{}
+ le.lastSeen = time.Time{}
return
}
if !vterrors.Equals(err, le.err) {
+ log.Infof("Got new last error %+v for %s, was %+v", err, le.name, le.err)
le.firstSeen = time.Now()
+ le.lastSeen = time.Now()
le.err = err
+ } else {
+ // same error seen
+ log.Infof("Got the same last error for %q: %+v ; first seen at %s and last seen %dms ago", le.name, le.err, le.firstSeen, int(time.Since(le.lastSeen).Milliseconds()))
+ if time.Since(le.lastSeen) > le.maxTimeInError {
+ // reset firstSeen, since it has been long enough since the last time we saw this error
+ log.Infof("Resetting firstSeen for %s, since it is too long since the last one", le.name)
+ le.firstSeen = time.Now()
+ }
+ le.lastSeen = time.Now()
}
- // The error is unchanged so we don't need to do anything
}
func (le *lastError) shouldRetry() bool {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/last_error_test.go b/go/vt/vttablet/tabletmanager/vreplication/last_error_test.go
index 8d0e353478a..08eaa67f3be 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/last_error_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/last_error_test.go
@@ -24,32 +24,63 @@ import (
"github.com/stretchr/testify/require"
)
-func TestLastError(t *testing.T) {
- le := newLastError("test", 100*time.Millisecond)
+const shortWait = 1 * time.Millisecond
+const longWait = 150 * time.Millisecond
+const maxTimeInError = 100 * time.Millisecond
- t.Run("long running error", func(t *testing.T) {
- err1 := fmt.Errorf("test1")
+// TestLastErrorZeroMaxTime tests maxTimeInError = 0, should always retry
+func TestLastErrorZeroMaxTime(t *testing.T) {
+ le := newLastError("test", 0)
+ err1 := fmt.Errorf("error1")
+ le.record(err1)
+ require.True(t, le.shouldRetry())
+ time.Sleep(shortWait)
+ require.True(t, le.shouldRetry())
+ time.Sleep(longWait)
+ require.True(t, le.shouldRetry())
+}
+
+// TestLastErrorNoError ensures that an uninitialized lastError always retries
+func TestLastErrorNoError(t *testing.T) {
+ le := newLastError("test", maxTimeInError)
+ require.True(t, le.shouldRetry())
+ err1 := fmt.Errorf("error1")
+ le.record(err1)
+ require.True(t, le.shouldRetry())
+ le.record(nil)
+ require.True(t, le.shouldRetry())
+}
+
+// TestLastErrorOneError validates that we retry an error if happening within the maxTimeInError, but not after
+func TestLastErrorOneError(t *testing.T) {
+ le := newLastError("test", maxTimeInError)
+ err1 := fmt.Errorf("error1")
+ le.record(err1)
+ require.True(t, le.shouldRetry())
+ time.Sleep(shortWait)
+ require.True(t, le.shouldRetry())
+ time.Sleep(shortWait)
+ require.True(t, le.shouldRetry())
+ time.Sleep(longWait)
+ require.False(t, le.shouldRetry())
+}
+
+// TestLastErrorRepeatedError confirms that if same error is repeated we don't retry
+// unless it happens after maxTimeInError
+func TestLastErrorRepeatedError(t *testing.T) {
+ le := newLastError("test", maxTimeInError)
+ err1 := fmt.Errorf("error1")
+ le.record(err1)
+ require.True(t, le.shouldRetry())
+ for i := 1; i < 10; i++ {
le.record(err1)
- require.True(t, le.shouldRetry())
- time.Sleep(150 * time.Millisecond)
- require.False(t, le.shouldRetry())
- })
-
- t.Run("new long running error", func(t *testing.T) {
- err2 := fmt.Errorf("test2")
- le.record(err2)
- require.True(t, le.shouldRetry())
- for i := 1; i < 10; i++ {
- le.record(err2)
- }
- require.True(t, le.shouldRetry())
- time.Sleep(150 * time.Millisecond)
- le.record(err2)
- require.False(t, le.shouldRetry())
- })
-
- t.Run("no error", func(t *testing.T) {
- le.record(nil)
- require.True(t, le.shouldRetry())
- })
+ time.Sleep(shortWait)
+ }
+ require.True(t, le.shouldRetry())
+
+ // same error happens after maxTimeInError, so it should retry
+ time.Sleep(longWait)
+ require.False(t, le.shouldRetry())
+ le.record(err1)
+ require.True(t, le.shouldRetry())
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go
index 8b97f02dc1e..780b1c0d064 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go
@@ -414,6 +414,55 @@ func TestBuildPlayerPlan(t *testing.T) {
},
},
},
+ }, {
+ input: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select c1, convert(c using utf8mb4) as c2 from t1",
+ }},
+ },
+ plan: &TestReplicatorPlan{
+ VStreamFilter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select c1, convert(c using utf8mb4) as c2 from t1",
+ }},
+ },
+ TargetTables: []string{"t1"},
+ TablePlans: map[string]*TestTablePlan{
+ "t1": {
+ TargetName: "t1",
+ SendRule: "t1",
+ PKReferences: []string{"c1"},
+ InsertFront: "insert into t1(c1,c2)",
+ InsertValues: "(:a_c1,convert(:a_c using utf8mb4))",
+ Insert: "insert into t1(c1,c2) values (:a_c1,convert(:a_c using utf8mb4))",
+ Update: "update t1 set c2=convert(:a_c using utf8mb4) where c1=:b_c1",
+ Delete: "delete from t1 where c1=:b_c1",
+ },
+ },
+ },
+ planpk: &TestReplicatorPlan{
+ VStreamFilter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select c1, convert(c using utf8mb4) as c2, pk1, pk2 from t1",
+ }},
+ },
+ TargetTables: []string{"t1"},
+ TablePlans: map[string]*TestTablePlan{
+ "t1": {
+ TargetName: "t1",
+ SendRule: "t1",
+ PKReferences: []string{"c1", "pk1", "pk2"},
+ InsertFront: "insert into t1(c1,c2)",
+ InsertValues: "(:a_c1,convert(:a_c using utf8mb4))",
+ Insert: "insert into t1(c1,c2) select :a_c1, convert(:a_c using utf8mb4) from dual where (:a_pk1,:a_pk2) <= (1,'aaa')",
+ Update: "update t1 set c2=convert(:a_c using utf8mb4) where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')",
+ Delete: "delete from t1 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')",
+ },
+ },
+ },
}, {
// Keywords as names.
input: &binlogdatapb.Filter{
diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats.go b/go/vt/vttablet/tabletmanager/vreplication/stats.go
index c8c242bab05..727d32b9d8d 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/stats.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/stats.go
@@ -27,6 +27,8 @@ import (
"vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/servenv"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
var (
@@ -142,7 +144,10 @@ func (st *vrStats) register() {
defer st.mu.Unlock()
result := make(map[string]string, len(st.controllers))
for _, ct := range st.controllers {
- result[fmt.Sprintf("%v", ct.id)] = ct.sourceTablet.Get()
+ ta := ct.sourceTablet.Load()
+ if ta != nil {
+ result[fmt.Sprintf("%v", ct.id)] = ta.(*topodatapb.TabletAlias).String()
+ }
}
return result
}))
@@ -394,8 +399,7 @@ func (st *vrStats) status() *EngineStatus {
ReplicationLagSeconds: ct.blpStats.ReplicationLagSeconds.Get(),
Counts: ct.blpStats.Timings.Counts(),
Rates: ct.blpStats.Rates.Get(),
- State: ct.blpStats.State.Get(),
- SourceTablet: ct.sourceTablet.Get(),
+ SourceTablet: ct.sourceTablet.Load().(*topodatapb.TabletAlias),
Messages: ct.blpStats.MessageHistory(),
QueryCounts: ct.blpStats.QueryCount.Counts(),
PhaseTimings: ct.blpStats.PhaseTimings.Counts(),
@@ -427,7 +431,7 @@ type ControllerStatus struct {
Counts map[string]int64
Rates map[string][]float64
State string
- SourceTablet string
+ SourceTablet *topodatapb.TabletAlias
Messages []string
QueryCounts map[string]int64
PhaseTimings map[string]int64
diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go
index 2accc3cfa24..b63583e57ee 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go
@@ -28,6 +28,8 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/proto/binlogdata"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
var wantOut = `
@@ -107,8 +109,14 @@ func TestStatusHtml(t *testing.T) {
done: make(chan struct{}),
},
}
- testStats.controllers[1].sourceTablet.Set("src1")
- testStats.controllers[2].sourceTablet.Set("src2")
+ testStats.controllers[1].sourceTablet.Store(&topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 01,
+ })
+ testStats.controllers[2].sourceTablet.Store(&topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 02,
+ })
close(testStats.controllers[2].done)
tpl := template.Must(template.New("test").Parse(vreplicationTemplate))
@@ -135,7 +143,10 @@ func TestVReplicationStats(t *testing.T) {
done: make(chan struct{}),
},
}
- testStats.controllers[1].sourceTablet.Set("src1")
+ testStats.controllers[1].sourceTablet.Store(&topodatapb.TabletAlias{
+ Cell: "zone1",
+ Uid: 01,
+ })
sleepTime := 1 * time.Millisecond
record := func(phase string) {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go
index d4a74540d3d..2204e8f18a3 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go
@@ -418,9 +418,28 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr
references: make(map[string]bool),
}
if expr, ok := aliased.Expr.(*sqlparser.ConvertUsingExpr); ok {
+ // Here we find the actual column name in the convert, in case
+ // this is a column rename and the AS is the new column.
+ // For example, in convert(c1 using utf8mb4) as c2, we want to find
+ // c1, because c1 exists in the current table whereas c2 is the renamed column
+ // in the desired table.
+ var colName sqlparser.IdentifierCI
+ err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ switch node := node.(type) {
+ case *sqlparser.ColName:
+ if !node.Qualifier.IsEmpty() {
+ return false, fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(node))
+ }
+ colName = node.Name
+ }
+ return true, nil
+ }, aliased.Expr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find column name for convert using expression: %v, %v", sqlparser.String(aliased.Expr), err)
+ }
selExpr := &sqlparser.ConvertUsingExpr{
Type: "utf8mb4",
- Expr: &sqlparser.ColName{Name: as},
+ Expr: &sqlparser.ColName{Name: colName},
}
cexpr.expr = expr
cexpr.operation = opExpr
diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go
index 3b30b46d1f4..971e09c77cf 100644
--- a/go/vt/vttablet/tabletserver/connpool/dbconn.go
+++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go
@@ -275,6 +275,24 @@ func (dbc *DBConn) streamOnce(ctx context.Context, query string, callback func(*
return err
}
+// StreamOnce executes the query and streams the results. But, does not retry on connection errors.
+func (dbc *DBConn) StreamOnce(ctx context.Context, query string, callback func(*sqltypes.Result) error, alloc func() *sqltypes.Result, streamBufferSize int, includedFields querypb.ExecuteOptions_IncludedFields) error {
+ resultSent := false
+ return dbc.streamOnce(
+ ctx,
+ query,
+ func(r *sqltypes.Result) error {
+ if !resultSent {
+ resultSent = true
+ r = r.StripMetadata(includedFields)
+ }
+ return callback(r)
+ },
+ alloc,
+ streamBufferSize,
+ )
+}
+
var (
getModeSQL = "select @@global.sql_mode"
getAutocommit = "select @@autocommit"
diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go
index ea9701faf40..f8462505e8f 100644
--- a/go/vt/vttablet/tabletserver/health_streamer.go
+++ b/go/vt/vttablet/tabletserver/health_streamer.go
@@ -25,9 +25,8 @@ import (
"github.com/spf13/pflag"
- "vitess.io/vitess/go/vt/servenv"
-
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/sqlparser"
@@ -82,6 +81,8 @@ type healthStreamer struct {
cancel context.CancelFunc
clients map[chan *querypb.StreamHealthResponse]struct{}
state *querypb.StreamHealthResponse
+ // isServingPrimary stores if this tablet is currently the serving primary or not.
+ isServingPrimary bool
history *history.History
@@ -307,6 +308,21 @@ func (hs *healthStreamer) AppendDetails(details []*kv) []*kv {
return details
}
+// MakePrimary tells the healthstreamer that the current tablet is now the primary,
+// so it can read and write to the MySQL instance for schema-tracking.
+func (hs *healthStreamer) MakePrimary(serving bool) {
+ hs.mu.Lock()
+ defer hs.mu.Unlock()
+ hs.isServingPrimary = serving
+}
+
+// MakeNonPrimary tells the healthstreamer that the current tablet is now not a primary.
+func (hs *healthStreamer) MakeNonPrimary() {
+ hs.mu.Lock()
+ defer hs.mu.Unlock()
+ hs.isServingPrimary = false
+}
+
func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) {
hs.unhealthyThreshold.Set(v)
shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse)
@@ -325,8 +341,10 @@ func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) {
func (hs *healthStreamer) reload() error {
hs.mu.Lock()
defer hs.mu.Unlock()
- // Schema Reload to happen only on primary.
- if hs.state.Target.TabletType != topodatapb.TabletType_PRIMARY {
+ // Schema Reload to happen only on primary when it is serving.
+ // We can be in a state when the primary is not serving after we have run DemotePrimary. In that case,
+ // we don't want to run any queries in MySQL, so we shouldn't reload anything in the healthStreamer.
+ if !hs.isServingPrimary {
return nil
}
diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go
index f7043c72e3c..443dc4fafbf 100644
--- a/go/vt/vttablet/tabletserver/health_streamer_test.go
+++ b/go/vt/vttablet/tabletserver/health_streamer_test.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/fakesqldb"
@@ -56,6 +57,36 @@ func newConfig(db *fakesqldb.DB) *tabletenv.TabletConfig {
return cfg
}
+// TestNotServingPrimaryNoWrite makes sure that the health-streamer doesn't write anything to the database when
+// the state is not serving primary.
+func TestNotServingPrimaryNoWrite(t *testing.T) {
+ db := fakesqldb.New(t)
+ defer db.Close()
+ config := newConfig(db)
+ config.SignalWhenSchemaChange = true
+
+ env := tabletenv.NewEnv(config, "TestNotServingPrimary")
+ alias := &topodatapb.TabletAlias{
+ Cell: "cell",
+ Uid: 1,
+ }
+ // Create a new health streamer and set it to a serving primary state
+ hs := newHealthStreamer(env, alias)
+ hs.isServingPrimary = true
+ hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, config.DB.DbaWithDB())
+ hs.Open()
+ defer hs.Close()
+
+ // Let's say the tablet goes to a non-serving primary state.
+ hs.MakePrimary(false)
+
+ // A reload now should not write anything to the database. If any write happens it will error out since we have not
+ // added any query to the database to expect.
+ err := hs.reload()
+ require.NoError(t, err)
+ require.NoError(t, db.LastError())
+}
+
func TestHealthStreamerBroadcast(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
@@ -166,6 +197,7 @@ func TestReloadSchema(t *testing.T) {
}
blpFunc = testBlpFunc
hs := newHealthStreamer(env, alias)
+ hs.MakePrimary(true)
target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}
configs := config.DB
@@ -227,6 +259,7 @@ func TestDoesNotReloadSchema(t *testing.T) {
}
blpFunc = testBlpFunc
hs := newHealthStreamer(env, alias)
+ hs.MakePrimary(true)
target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}
configs := config.DB
@@ -279,6 +312,7 @@ func TestInitialReloadSchema(t *testing.T) {
}
blpFunc = testBlpFunc
hs := newHealthStreamer(env, alias)
+ hs.MakePrimary(true)
target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}
configs := config.DB
diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go
index 4de993dcfb8..41d6192dffb 100644
--- a/go/vt/vttablet/tabletserver/query_engine.go
+++ b/go/vt/vttablet/tabletserver/query_engine.go
@@ -171,7 +171,10 @@ type QueryEngine struct {
consolidatorMode sync2.AtomicString
// stats
- queryCounts, queryTimes, queryRowCounts, queryErrorCounts, queryRowsAffected, queryRowsReturned *stats.CountersWithMultiLabels
+ queryCounts, queryTimes, queryRowCounts, queryErrorCounts, queryRowsAffected, queryRowsReturned, queryTextCharsProcessed *stats.CountersWithMultiLabels
+
+ // stats flags
+ enablePerWorkloadTableMetrics bool
// Loggers
accessCheckerLogger *logutil.ThrottledLogger
@@ -189,11 +192,12 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine {
}
qe := &QueryEngine{
- env: env,
- se: se,
- tables: make(map[string]*schema.Table),
- plans: cache.NewDefaultCacheImpl(cacheCfg),
- queryRuleSources: rules.NewMap(),
+ env: env,
+ se: se,
+ tables: make(map[string]*schema.Table),
+ plans: cache.NewDefaultCacheImpl(cacheCfg),
+ queryRuleSources: rules.NewMap(),
+ enablePerWorkloadTableMetrics: config.EnablePerWorkloadTableMetrics,
}
qe.conns = connpool.NewPool(env, "ConnPool", config.OltpReadPool)
@@ -246,12 +250,19 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine {
env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", qe.plans.UsedCapacity)
env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", qe.plans.MaxCapacity)
env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", qe.plans.Evictions)
- qe.queryCounts = env.Exporter().NewCountersWithMultiLabels("QueryCounts", "query counts", []string{"Table", "Plan"})
- qe.queryTimes = env.Exporter().NewCountersWithMultiLabels("QueryTimesNs", "query times in ns", []string{"Table", "Plan"})
- qe.queryRowCounts = env.Exporter().NewCountersWithMultiLabels("QueryRowCounts", "(DEPRECATED - use QueryRowsAffected and QueryRowsReturned instead) query row counts", []string{"Table", "Plan"})
- qe.queryRowsAffected = env.Exporter().NewCountersWithMultiLabels("QueryRowsAffected", "query rows affected", []string{"Table", "Plan"})
- qe.queryRowsReturned = env.Exporter().NewCountersWithMultiLabels("QueryRowsReturned", "query rows returned", []string{"Table", "Plan"})
- qe.queryErrorCounts = env.Exporter().NewCountersWithMultiLabels("QueryErrorCounts", "query error counts", []string{"Table", "Plan"})
+
+ labels := []string{"Table", "Plan"}
+ if config.EnablePerWorkloadTableMetrics {
+ labels = []string{"Table", "Plan", "Workload"}
+ }
+
+ qe.queryCounts = env.Exporter().NewCountersWithMultiLabels("QueryCounts", "query counts", labels)
+ qe.queryTimes = env.Exporter().NewCountersWithMultiLabels("QueryTimesNs", "query times in ns", labels)
+ qe.queryRowCounts = env.Exporter().NewCountersWithMultiLabels("QueryRowCounts", "(DEPRECATED - use QueryRowsAffected and QueryRowsReturned instead) query row counts", labels)
+ qe.queryRowsAffected = env.Exporter().NewCountersWithMultiLabels("QueryRowsAffected", "query rows affected", labels)
+ qe.queryRowsReturned = env.Exporter().NewCountersWithMultiLabels("QueryRowsReturned", "query rows returned", labels)
+ qe.queryTextCharsProcessed = env.Exporter().NewCountersWithMultiLabels("QueryTextCharactersProcessed", "query text characters processed", labels)
+ qe.queryErrorCounts = env.Exporter().NewCountersWithMultiLabels("QueryErrorCounts", "query error counts", labels)
env.Exporter().HandleFunc("/debug/hotrows", qe.txSerializer.ServeHTTP)
env.Exporter().HandleFunc("/debug/tablet_plans", qe.handleHTTPQueryPlans)
@@ -479,18 +490,25 @@ func (qe *QueryEngine) QueryPlanCacheLen() int {
}
// AddStats adds the given stats for the planName.tableName
-func (qe *QueryEngine) AddStats(planType planbuilder.PlanType, tableName string, queryCount int64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount int64) {
+func (qe *QueryEngine) AddStats(plan *TabletPlan, tableName, workload string, queryCount int64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount int64) {
// table names can contain "." characters, replace them!
- keys := []string{tableName, planType.String()}
+ keys := []string{tableName, plan.PlanID.String()}
+ // Only use the workload as a label if that's enabled in the configuration.
+ if qe.enablePerWorkloadTableMetrics {
+ keys = append(keys, workload)
+ }
qe.queryCounts.Add(keys, queryCount)
qe.queryTimes.Add(keys, int64(duration))
qe.queryRowCounts.Add(keys, rowsAffected)
qe.queryErrorCounts.Add(keys, errorCount)
+ if plan.FullQuery != nil {
+ qe.queryTextCharsProcessed.Add(keys, int64(len(plan.FullQuery.Query)))
+ }
// For certain plan types like select, we only want to add their metrics to rows returned
// But there are special cases like `SELECT ... INTO OUTFILE ''` which return positive rows affected
// So we check if it is positive and add that too.
- switch planType {
+ switch plan.PlanID {
case planbuilder.PlanSelect, planbuilder.PlanSelectStream, planbuilder.PlanSelectImpossible, planbuilder.PlanShow, planbuilder.PlanOtherRead:
qe.queryRowsReturned.Add(keys, rowsReturned)
if rowsAffected > 0 {
diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go
index c03af9186b3..db280a6764f 100644
--- a/go/vt/vttablet/tabletserver/query_engine_test.go
+++ b/go/vt/vttablet/tabletserver/query_engine_test.go
@@ -573,83 +573,191 @@ func TestPlanCachePollution(t *testing.T) {
}
func TestAddQueryStats(t *testing.T) {
+ fakeSelectPlan := &TabletPlan{
+ Plan: &planbuilder.Plan{
+ PlanID: planbuilder.PlanSelect,
+ FullQuery: &sqlparser.ParsedQuery{Query: `select * from something where something=123`}, // 43 length
+ },
+ }
+ fakeInsertPlan := &TabletPlan{
+ Plan: &planbuilder.Plan{
+ PlanID: planbuilder.PlanInsert,
+ FullQuery: &sqlparser.ParsedQuery{Query: `insert into something (id, msg) values(123, 'hello world!')`}, // 59 length
+ },
+ }
testcases := []struct {
- name string
- planType planbuilder.PlanType
- tableName string
- queryCount int64
- duration time.Duration
- mysqlTime time.Duration
- rowsAffected int64
- rowsReturned int64
- errorCount int64
- expectedQueryCounts string
- expectedQueryTimes string
- expectedQueryRowsAffected string
- expectedQueryRowsReturned string
- expectedQueryRowCounts string
- expectedQueryErrorCounts string
+ name string
+ plan *TabletPlan
+ tableName string
+ queryCount int64
+ duration time.Duration
+ mysqlTime time.Duration
+ rowsAffected int64
+ rowsReturned int64
+ errorCount int64
+ errorCode string
+ enablePerWorkloadTableMetrics bool
+ workload string
+ expectedQueryCounts string
+ expectedQueryTimes string
+ expectedQueryRowsAffected string
+ expectedQueryRowsReturned string
+ expectedQueryTextCharsProcessed string
+ expectedQueryErrorCounts string
+ expectedQueryRowCounts string
}{
{
- name: "select query",
- planType: planbuilder.PlanSelect,
- tableName: "A",
- queryCount: 1,
- duration: 10,
- rowsAffected: 0,
- rowsReturned: 15,
- errorCount: 0,
- expectedQueryCounts: `{"A.Select": 1}`,
- expectedQueryTimes: `{"A.Select": 10}`,
- expectedQueryRowsAffected: `{}`,
- expectedQueryRowsReturned: `{"A.Select": 15}`,
- expectedQueryRowCounts: `{"A.Select": 0}`,
- expectedQueryErrorCounts: `{"A.Select": 0}`,
+ name: "select query",
+ plan: fakeSelectPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 0,
+ rowsReturned: 15,
+ errorCount: 0,
+ errorCode: "OK",
+ enablePerWorkloadTableMetrics: false,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Select": 1}`,
+ expectedQueryTimes: `{"A.Select": 10}`,
+ expectedQueryRowsAffected: `{}`,
+ expectedQueryRowsReturned: `{"A.Select": 15}`,
+ expectedQueryTextCharsProcessed: `{"A.Select": 43}`,
+ expectedQueryRowCounts: `{"A.Select": 0}`,
+ expectedQueryErrorCounts: `{"A.Select": 0}`,
+ }, {
+ name: "select into query",
+ plan: fakeSelectPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 15,
+ rowsReturned: 0,
+ errorCount: 0,
+ errorCode: "OK",
+ enablePerWorkloadTableMetrics: false,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Select": 1}`,
+ expectedQueryTimes: `{"A.Select": 10}`,
+ expectedQueryRowsAffected: `{"A.Select": 15}`,
+ expectedQueryRowsReturned: `{"A.Select": 0}`,
+ expectedQueryTextCharsProcessed: `{"A.Select": 43}`,
+ expectedQueryRowCounts: `{"A.Select": 15}`,
+ expectedQueryErrorCounts: `{"A.Select": 0}`,
+ }, {
+ name: "error",
+ plan: fakeSelectPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 0,
+ rowsReturned: 0,
+ errorCount: 1,
+ errorCode: "RESOURCE_EXHAUSTED",
+ enablePerWorkloadTableMetrics: false,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Select": 1}`,
+ expectedQueryTimes: `{"A.Select": 10}`,
+ expectedQueryRowsAffected: `{}`,
+ expectedQueryRowsReturned: `{"A.Select": 0}`,
+ expectedQueryTextCharsProcessed: `{"A.Select": 43}`,
+ expectedQueryRowCounts: `{"A.Select": 0}`,
+ expectedQueryErrorCounts: `{"A.Select": 1}`,
+ }, {
+ name: "insert query",
+ plan: fakeInsertPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 15,
+ rowsReturned: 0,
+ errorCount: 0,
+ errorCode: "OK",
+ enablePerWorkloadTableMetrics: false,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Insert": 1}`,
+ expectedQueryTimes: `{"A.Insert": 10}`,
+ expectedQueryRowsAffected: `{"A.Insert": 15}`,
+ expectedQueryRowsReturned: `{}`,
+ expectedQueryTextCharsProcessed: `{"A.Insert": 59}`,
+ expectedQueryRowCounts: `{"A.Insert": 15}`,
+ expectedQueryErrorCounts: `{"A.Insert": 0}`,
+ }, {
+ name: "select query with per workload metrics",
+ plan: fakeSelectPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 0,
+ rowsReturned: 15,
+ errorCount: 0,
+ errorCode: "OK",
+ enablePerWorkloadTableMetrics: true,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Select.some-workload": 1}`,
+ expectedQueryTimes: `{"A.Select.some-workload": 10}`,
+ expectedQueryRowsAffected: `{}`,
+ expectedQueryRowsReturned: `{"A.Select.some-workload": 15}`,
+ expectedQueryTextCharsProcessed: `{"A.Select.some-workload": 43}`,
+ expectedQueryRowCounts: `{"A.Select.some-workload": 0}`,
+ expectedQueryErrorCounts: `{"A.Select.some-workload": 0}`,
}, {
- name: "select into query",
- planType: planbuilder.PlanSelect,
- tableName: "A",
- queryCount: 1,
- duration: 10,
- rowsAffected: 15,
- rowsReturned: 0,
- errorCount: 0,
- expectedQueryCounts: `{"A.Select": 1}`,
- expectedQueryTimes: `{"A.Select": 10}`,
- expectedQueryRowsAffected: `{"A.Select": 15}`,
- expectedQueryRowsReturned: `{"A.Select": 0}`,
- expectedQueryRowCounts: `{"A.Select": 15}`,
- expectedQueryErrorCounts: `{"A.Select": 0}`,
+ name: "select into query with per workload metrics",
+ plan: fakeSelectPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 15,
+ rowsReturned: 0,
+ errorCount: 0,
+ errorCode: "OK",
+ enablePerWorkloadTableMetrics: true,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Select.some-workload": 1}`,
+ expectedQueryTimes: `{"A.Select.some-workload": 10}`,
+ expectedQueryRowsAffected: `{"A.Select.some-workload": 15}`,
+ expectedQueryRowsReturned: `{"A.Select.some-workload": 0}`,
+ expectedQueryTextCharsProcessed: `{"A.Select.some-workload": 43}`,
+ expectedQueryRowCounts: `{"A.Select.some-workload": 15}`,
+ expectedQueryErrorCounts: `{"A.Select.some-workload": 0}`,
}, {
- name: "error",
- planType: planbuilder.PlanSelect,
- tableName: "A",
- queryCount: 1,
- duration: 10,
- rowsAffected: 0,
- rowsReturned: 0,
- errorCount: 1,
- expectedQueryCounts: `{"A.Select": 1}`,
- expectedQueryTimes: `{"A.Select": 10}`,
- expectedQueryRowsAffected: `{}`,
- expectedQueryRowsReturned: `{"A.Select": 0}`,
- expectedQueryRowCounts: `{"A.Select": 0}`,
- expectedQueryErrorCounts: `{"A.Select": 1}`,
+ name: "error with per workload metrics",
+ plan: fakeSelectPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 0,
+ rowsReturned: 0,
+ errorCount: 1,
+ errorCode: "RESOURCE_EXHAUSTED",
+ enablePerWorkloadTableMetrics: true,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Select.some-workload": 1}`,
+ expectedQueryTimes: `{"A.Select.some-workload": 10}`,
+ expectedQueryRowsAffected: `{}`,
+ expectedQueryRowsReturned: `{"A.Select.some-workload": 0}`,
+ expectedQueryTextCharsProcessed: `{"A.Select.some-workload": 43}`,
+ expectedQueryRowCounts: `{"A.Select.some-workload": 0}`,
+ expectedQueryErrorCounts: `{"A.Select.some-workload": 1}`,
}, {
- name: "insert query",
- planType: planbuilder.PlanInsert,
- tableName: "A",
- queryCount: 1,
- duration: 10,
- rowsAffected: 15,
- rowsReturned: 0,
- errorCount: 0,
- expectedQueryCounts: `{"A.Insert": 1}`,
- expectedQueryTimes: `{"A.Insert": 10}`,
- expectedQueryRowsAffected: `{"A.Insert": 15}`,
- expectedQueryRowsReturned: `{}`,
- expectedQueryRowCounts: `{"A.Insert": 15}`,
- expectedQueryErrorCounts: `{"A.Insert": 0}`,
+ name: "insert query with per workload metrics",
+ plan: fakeInsertPlan,
+ tableName: "A",
+ queryCount: 1,
+ duration: 10,
+ rowsAffected: 15,
+ rowsReturned: 0,
+ errorCount: 0,
+ errorCode: "OK",
+ enablePerWorkloadTableMetrics: true,
+ workload: "some-workload",
+ expectedQueryCounts: `{"A.Insert.some-workload": 1}`,
+ expectedQueryTimes: `{"A.Insert.some-workload": 10}`,
+ expectedQueryRowsAffected: `{"A.Insert.some-workload": 15}`,
+ expectedQueryRowsReturned: `{}`,
+ expectedQueryTextCharsProcessed: `{"A.Insert.some-workload": 59}`,
+ expectedQueryRowCounts: `{"A.Insert.some-workload": 15}`,
+ expectedQueryErrorCounts: `{"A.Insert.some-workload": 0}`,
},
}
@@ -658,15 +766,17 @@ func TestAddQueryStats(t *testing.T) {
t.Run(testcase.name, func(t *testing.T) {
config := tabletenv.NewDefaultConfig()
config.DB = newDBConfigs(fakesqldb.New(t))
+ config.EnablePerWorkloadTableMetrics = testcase.enablePerWorkloadTableMetrics
env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name)
se := schema.NewEngine(env)
qe := NewQueryEngine(env, se)
- qe.AddStats(testcase.planType, testcase.tableName, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount)
+ qe.AddStats(testcase.plan, testcase.tableName, testcase.workload, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount)
assert.Equal(t, testcase.expectedQueryCounts, qe.queryCounts.String())
assert.Equal(t, testcase.expectedQueryTimes, qe.queryTimes.String())
assert.Equal(t, testcase.expectedQueryRowsAffected, qe.queryRowsAffected.String())
assert.Equal(t, testcase.expectedQueryRowsReturned, qe.queryRowsReturned.String())
assert.Equal(t, testcase.expectedQueryRowCounts, qe.queryRowCounts.String())
+ assert.Equal(t, testcase.expectedQueryTextCharsProcessed, qe.queryTextCharsProcessed.String())
assert.Equal(t, testcase.expectedQueryErrorCounts, qe.queryErrorCounts.String())
})
}
diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go
index 214fced7005..f1775833d6a 100644
--- a/go/vt/vttablet/tabletserver/query_executor.go
+++ b/go/vt/vttablet/tabletserver/query_executor.go
@@ -62,6 +62,7 @@ type QueryExecutor struct {
tsv *TabletServer
tabletType topodatapb.TabletType
setting *pools.Setting
+ workload string
}
const (
@@ -75,6 +76,8 @@ var streamResultPool = sync.Pool{New: func() any {
}
}}
+var errTxThrottled = vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction throttled")
+
func returnStreamResult(result *sqltypes.Result) error {
// only return large results slices to the pool
if cap(result.Rows) >= streamRowsSize {
@@ -118,11 +121,11 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) {
}
if reply == nil {
- qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, 1, duration, mysqlTime, 0, 0, 1)
+ qre.tsv.qe.AddStats(qre.plan, tableName, qre.options.GetWorkloadName(), 1, duration, mysqlTime, 0, 0, 1)
qre.plan.AddStats(1, duration, mysqlTime, 0, 0, 1)
return
}
- qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0)
+ qre.tsv.qe.AddStats(qre.plan, tableName, qre.options.GetWorkloadName(), 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0)
qre.plan.AddStats(1, duration, mysqlTime, reply.RowsAffected, uint64(len(reply.Rows)), 0)
qre.logStats.RowsAffected = int(reply.RowsAffected)
qre.logStats.Rows = reply.Rows
@@ -202,6 +205,10 @@ func (qre *QueryExecutor) execAutocommit(f func(conn *StatefulConnection) (*sqlt
}
qre.options.TransactionIsolation = querypb.ExecuteOptions_AUTOCOMMIT
+ if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options), qre.options.GetWorkloadName()) {
+ return nil, errTxThrottled
+ }
+
conn, _, _, err := qre.tsv.te.txPool.Begin(qre.ctx, qre.options, false, 0, nil, qre.setting)
if err != nil {
@@ -213,6 +220,9 @@ func (qre *QueryExecutor) execAutocommit(f func(conn *StatefulConnection) (*sqlt
}
func (qre *QueryExecutor) execAsTransaction(f func(conn *StatefulConnection) (*sqltypes.Result, error)) (*sqltypes.Result, error) {
+ if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options), qre.options.GetWorkloadName()) {
+ return nil, errTxThrottled
+ }
conn, beginSQL, _, err := qre.tsv.te.txPool.Begin(qre.ctx, qre.options, false, 0, nil, qre.setting)
if err != nil {
return nil, err
@@ -303,7 +313,7 @@ func (qre *QueryExecutor) Stream(callback StreamCallback) error {
}
var replaceKeyspace string
- if sqltypes.IncludeFieldsOrDefault(qre.options) == querypb.ExecuteOptions_ALL {
+ if sqltypes.IncludeFieldsOrDefault(qre.options) == querypb.ExecuteOptions_ALL && qre.tsv.sm.target.Keyspace != qre.tsv.config.DB.DBName {
replaceKeyspace = qre.tsv.sm.target.Keyspace
}
@@ -1006,29 +1016,23 @@ func (qre *QueryExecutor) execStreamSQL(conn *connpool.DBConn, isTransaction boo
return callback(result)
}
- qd := NewQueryDetail(qre.logStats.Ctx, conn)
+ start := time.Now()
+ defer qre.logStats.AddRewrittenSQL(sql, start)
// Add query detail object into QueryExecutor TableServer list w.r.t if it is a transactional or not. Previously we were adding it
// to olapql list regardless but that resulted in problems, where long-running stream queries which can be stateful (or transactional)
// weren't getting cleaned up during unserveCommon>handleShutdownGracePeriod in state_manager.go.
// This change will ensure that long-running streaming stateful queries get gracefully shutdown during ServingTypeChange
// once their grace period is over.
+ qd := NewQueryDetail(qre.logStats.Ctx, conn)
if isTransaction {
qre.tsv.statefulql.Add(qd)
defer qre.tsv.statefulql.Remove(qd)
- } else {
- qre.tsv.olapql.Add(qd)
- defer qre.tsv.olapql.Remove(qd)
+ return conn.StreamOnce(ctx, sql, callBackClosingSpan, allocStreamResult, int(qre.tsv.qe.streamBufferSize.Get()), sqltypes.IncludeFieldsOrDefault(qre.options))
}
-
- start := time.Now()
- err := conn.Stream(ctx, sql, callBackClosingSpan, allocStreamResult, int(qre.tsv.qe.streamBufferSize.Get()), sqltypes.IncludeFieldsOrDefault(qre.options))
- qre.logStats.AddRewrittenSQL(sql, start)
- if err != nil {
- // MySQL error that isn't due to a connection issue
- return err
- }
- return nil
+ qre.tsv.olapql.Add(qd)
+ defer qre.tsv.olapql.Remove(qd)
+ return conn.Stream(ctx, sql, callBackClosingSpan, allocStreamResult, int(qre.tsv.qe.streamBufferSize.Get()), sqltypes.IncludeFieldsOrDefault(qre.options))
}
func (qre *QueryExecutor) recordUserQuery(queryType string, duration int64) {
diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go
index 526419c4f49..934d933ecac 100644
--- a/go/vt/vttablet/tabletserver/query_executor_test.go
+++ b/go/vt/vttablet/tabletserver/query_executor_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package tabletserver
import (
+ "context"
"fmt"
"io"
"math/rand"
@@ -24,10 +25,6 @@ import (
"strings"
"testing"
- "vitess.io/vitess/go/vt/vttablet/tabletserver/tx"
-
- "context"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -44,6 +41,8 @@ import (
"vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder"
"vitess.io/vitess/go/vt/vttablet/tabletserver/rules"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/tx"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler"
querypb "vitess.io/vitess/go/vt/proto/query"
tableaclpb "vitess.io/vitess/go/vt/proto/tableacl"
@@ -81,6 +80,10 @@ func TestQueryExecutorPlans(t *testing.T) {
// inTxWant is the query log we expect if we're in a transation.
// If empty, then we should expect the same as logWant.
inTxWant string
+ // errorWant is the error we expect to get, if any, and should be nil if no error should be returned
+ errorWant error
+ // TxThrottler allows the test case to override the transaction throttler
+ txThrottler txthrottler.TxThrottler
}{{
input: "select * from t",
dbResponses: []dbResponse{{
@@ -267,7 +270,25 @@ func TestQueryExecutorPlans(t *testing.T) {
resultWant: emptyResult,
planWant: "Show",
logWant: "show create table mysql.`user`",
- }}
+ }, {
+ input: "update test_table set a=1",
+ dbResponses: []dbResponse{{
+ query: "update test_table set a = 1 limit 10001",
+ result: dmlResult,
+ }},
+ errorWant: errTxThrottled,
+ txThrottler: &mockTxThrottler{true},
+ }, {
+ input: "update test_table set a=1",
+ passThrough: true,
+ dbResponses: []dbResponse{{
+ query: "update test_table set a = 1 limit 10001",
+ result: dmlResult,
+ }},
+ errorWant: errTxThrottled,
+ txThrottler: &mockTxThrottler{true},
+ },
+ }
for _, tcase := range testcases {
t.Run(tcase.input, func(t *testing.T) {
db := setUpQueryExecutorTest(t)
@@ -277,6 +298,9 @@ func TestQueryExecutorPlans(t *testing.T) {
}
ctx := context.Background()
tsv := newTestTabletServer(ctx, noFlags, db)
+ if tcase.txThrottler != nil {
+ tsv.txThrottler = tcase.txThrottler
+ }
tsv.config.DB.DBName = "ks"
defer tsv.StopService()
@@ -285,32 +309,39 @@ func TestQueryExecutorPlans(t *testing.T) {
// Test outside a transaction.
qre := newTestQueryExecutor(ctx, tsv, tcase.input, 0)
got, err := qre.Execute()
- require.NoError(t, err, tcase.input)
- assert.Equal(t, tcase.resultWant, got, tcase.input)
- assert.Equal(t, tcase.planWant, qre.logStats.PlanType, tcase.input)
- assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input)
-
+ if tcase.errorWant == nil {
+ require.NoError(t, err, tcase.input)
+ assert.Equal(t, tcase.resultWant, got, tcase.input)
+ assert.Equal(t, tcase.planWant, qre.logStats.PlanType, tcase.input)
+ assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input)
+ } else {
+ assert.True(t, vterrors.Equals(err, tcase.errorWant))
+ }
// Wait for the existing query to be processed by the cache
tsv.QueryPlanCacheWait()
// Test inside a transaction.
target := tsv.sm.Target()
state, err := tsv.Begin(ctx, target, nil)
- require.NoError(t, err)
- require.NotNil(t, state.TabletAlias, "alias should not be nil")
- assert.Equal(t, tsv.alias, state.TabletAlias, "Wrong alias returned by Begin")
- defer tsv.Commit(ctx, target, state.TransactionID)
-
- qre = newTestQueryExecutor(ctx, tsv, tcase.input, state.TransactionID)
- got, err = qre.Execute()
- require.NoError(t, err, tcase.input)
- assert.Equal(t, tcase.resultWant, got, "in tx: %v", tcase.input)
- assert.Equal(t, tcase.planWant, qre.logStats.PlanType, "in tx: %v", tcase.input)
- want := tcase.logWant
- if tcase.inTxWant != "" {
- want = tcase.inTxWant
+ if tcase.errorWant == nil {
+ require.NoError(t, err)
+ require.NotNil(t, state.TabletAlias, "alias should not be nil")
+ assert.Equal(t, tsv.alias, state.TabletAlias, "Wrong alias returned by Begin")
+ defer tsv.Commit(ctx, target, state.TransactionID)
+
+ qre = newTestQueryExecutor(ctx, tsv, tcase.input, state.TransactionID)
+ got, err = qre.Execute()
+ require.NoError(t, err, tcase.input)
+ assert.Equal(t, tcase.resultWant, got, "in tx: %v", tcase.input)
+ assert.Equal(t, tcase.planWant, qre.logStats.PlanType, "in tx: %v", tcase.input)
+ want := tcase.logWant
+ if tcase.inTxWant != "" {
+ want = tcase.inTxWant
+ }
+ assert.Equal(t, want, qre.logStats.RewrittenSQL(), "in tx: %v", tcase.input)
+ } else {
+ assert.True(t, vterrors.Equals(err, tcase.errorWant))
}
- assert.Equal(t, want, qre.logStats.RewrittenSQL(), "in tx: %v", tcase.input)
})
}
}
@@ -1540,3 +1571,22 @@ func addQueryExecutorSupportedQueries(db *fakesqldb.DB) {
}},
})
}
+
+type mockTxThrottler struct {
+ throttle bool
+}
+
+func (m mockTxThrottler) InitDBConfig(target *querypb.Target) {
+ panic("implement me")
+}
+
+func (m mockTxThrottler) Open() (err error) {
+ return nil
+}
+
+func (m mockTxThrottler) Close() {
+}
+
+func (m mockTxThrottler) Throttle(priority int, workload string) (result bool) {
+ return m.throttle
+}
diff --git a/go/vt/vttablet/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go
index dfad235e721..e78199c50ad 100644
--- a/go/vt/vttablet/tabletserver/query_list.go
+++ b/go/vt/vttablet/tabletserver/query_list.go
@@ -17,13 +17,12 @@ limitations under the License.
package tabletserver
import (
+ "context"
"html/template"
"sort"
"sync"
"time"
- "context"
-
"vitess.io/vitess/go/streamlog"
"vitess.io/vitess/go/vt/callinfo"
"vitess.io/vitess/go/vt/sqlparser"
@@ -52,15 +51,18 @@ func NewQueryDetail(ctx context.Context, conn killable) *QueryDetail {
type QueryList struct {
name string
- mu sync.Mutex
- queryDetails map[int64]*QueryDetail
+ mu sync.Mutex
+ // on reconnect connection id will get reused by a different connection.
+ // so have to maintain a list to compare with the actual connection.
+ // and remove appropriately.
+ queryDetails map[int64][]*QueryDetail
}
// NewQueryList creates a new QueryList
func NewQueryList(name string) *QueryList {
return &QueryList{
name: name,
- queryDetails: make(map[int64]*QueryDetail),
+ queryDetails: make(map[int64][]*QueryDetail),
}
}
@@ -68,25 +70,46 @@ func NewQueryList(name string) *QueryList {
func (ql *QueryList) Add(qd *QueryDetail) {
ql.mu.Lock()
defer ql.mu.Unlock()
- ql.queryDetails[qd.connID] = qd
+ qds, exists := ql.queryDetails[qd.connID]
+ if exists {
+ ql.queryDetails[qd.connID] = append(qds, qd)
+ } else {
+ ql.queryDetails[qd.connID] = []*QueryDetail{qd}
+ }
}
// Remove removes a QueryDetail from QueryList
func (ql *QueryList) Remove(qd *QueryDetail) {
ql.mu.Lock()
defer ql.mu.Unlock()
- delete(ql.queryDetails, qd.connID)
+ qds, exists := ql.queryDetails[qd.connID]
+ if !exists {
+ return
+ }
+ if len(qds) == 1 {
+ delete(ql.queryDetails, qd.connID)
+ return
+ }
+ for i, q := range qds {
+ // match with the actual connection ID.
+ if q.conn.ID() == qd.conn.ID() {
+ ql.queryDetails[qd.connID] = append(qds[:i], qds[i+1:]...)
+ return
+ }
+ }
}
// Terminate updates the query status and kills the connection
func (ql *QueryList) Terminate(connID int64) bool {
ql.mu.Lock()
defer ql.mu.Unlock()
- qd := ql.queryDetails[connID]
- if qd == nil {
+ qds, exists := ql.queryDetails[connID]
+ if !exists {
return false
}
- qd.conn.Kill("QueryList.Terminate()", time.Since(qd.start))
+ for _, qd := range qds {
+ _ = qd.conn.Kill("QueryList.Terminate()", time.Since(qd.start))
+ }
return true
}
@@ -94,8 +117,10 @@ func (ql *QueryList) Terminate(connID int64) bool {
func (ql *QueryList) TerminateAll() {
ql.mu.Lock()
defer ql.mu.Unlock()
- for _, qd := range ql.queryDetails {
- qd.conn.Kill("QueryList.TerminateAll()", time.Since(qd.start))
+ for _, qds := range ql.queryDetails {
+ for _, qd := range qds {
+ _ = qd.conn.Kill("QueryList.TerminateAll()", time.Since(qd.start))
+ }
}
}
@@ -120,20 +145,22 @@ func (a byStartTime) Less(i, j int) bool { return a[i].Start.Before(a[j].Start)
// AppendQueryzRows returns a list of QueryDetailzRow sorted by start time
func (ql *QueryList) AppendQueryzRows(rows []QueryDetailzRow) []QueryDetailzRow {
ql.mu.Lock()
- for _, qd := range ql.queryDetails {
- query := qd.conn.Current()
- if streamlog.GetRedactDebugUIQueries() {
- query, _ = sqlparser.RedactSQLQuery(query)
- }
- row := QueryDetailzRow{
- Type: ql.name,
- Query: query,
- ContextHTML: callinfo.HTMLFromContext(qd.ctx),
- Start: qd.start,
- Duration: time.Since(qd.start),
- ConnID: qd.connID,
+ for _, qds := range ql.queryDetails {
+ for _, qd := range qds {
+ query := qd.conn.Current()
+ if streamlog.GetRedactDebugUIQueries() {
+ query, _ = sqlparser.RedactSQLQuery(query)
+ }
+ row := QueryDetailzRow{
+ Type: ql.name,
+ Query: query,
+ ContextHTML: callinfo.HTMLFromContext(qd.ctx),
+ Start: qd.start,
+ Duration: time.Since(qd.start),
+ ConnID: qd.connID,
+ }
+ rows = append(rows, row)
}
- rows = append(rows, row)
}
ql.mu.Unlock()
sort.Sort(byStartTime(rows))
diff --git a/go/vt/vttablet/tabletserver/query_list_test.go b/go/vt/vttablet/tabletserver/query_list_test.go
index 40c546ef8ca..02b24d86cda 100644
--- a/go/vt/vttablet/tabletserver/query_list_test.go
+++ b/go/vt/vttablet/tabletserver/query_list_test.go
@@ -17,10 +17,11 @@ limitations under the License.
package tabletserver
import (
+ "context"
"testing"
"time"
- "context"
+ "github.com/stretchr/testify/require"
)
type testConn struct {
@@ -48,7 +49,7 @@ func TestQueryList(t *testing.T) {
qd := NewQueryDetail(context.Background(), &testConn{id: connID})
ql.Add(qd)
- if qd1, ok := ql.queryDetails[connID]; !ok || qd1.connID != connID {
+ if qd1, ok := ql.queryDetails[connID]; !ok || qd1[0].connID != connID {
t.Errorf("failed to add to QueryList")
}
@@ -66,3 +67,26 @@ func TestQueryList(t *testing.T) {
t.Errorf("failed to remove from QueryList")
}
}
+
+func TestQueryListChangeConnIDInMiddle(t *testing.T) {
+ ql := NewQueryList("test")
+ connID := int64(1)
+ qd1 := NewQueryDetail(context.Background(), &testConn{id: connID})
+ ql.Add(qd1)
+
+ conn := &testConn{id: connID}
+ qd2 := NewQueryDetail(context.Background(), conn)
+ ql.Add(qd2)
+
+ require.Len(t, ql.queryDetails[1], 2)
+
+ // change the connID in the middle
+ conn.id = 2
+
+ // remove the same object.
+ ql.Remove(qd2)
+
+ require.Len(t, ql.queryDetails[1], 1)
+ require.Equal(t, qd1, ql.queryDetails[1][0])
+ require.NotEqual(t, qd2, ql.queryDetails[1][0])
+}
diff --git a/go/vt/vttablet/tabletserver/repltracker/poller_test.go b/go/vt/vttablet/tabletserver/repltracker/poller_test.go
index 3dc27c771ca..e0734118160 100644
--- a/go/vt/vttablet/tabletserver/repltracker/poller_test.go
+++ b/go/vt/vttablet/tabletserver/repltracker/poller_test.go
@@ -23,12 +23,12 @@ import (
"github.com/stretchr/testify/assert"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
)
func TestPoller(t *testing.T) {
poller := &poller{}
- mysqld := fakemysqldaemon.NewFakeMysqlDaemon(nil)
+ mysqld := mysqlctl.NewFakeMysqlDaemon(nil)
poller.InitDBConfig(mysqld)
mysqld.ReplicationStatusError = errors.New("err")
diff --git a/go/vt/vttablet/tabletserver/repltracker/reader.go b/go/vt/vttablet/tabletserver/repltracker/reader.go
index e58565c9147..f9bd07fceb3 100644
--- a/go/vt/vttablet/tabletserver/repltracker/reader.go
+++ b/go/vt/vttablet/tabletserver/repltracker/reader.go
@@ -122,6 +122,9 @@ func (r *heartbeatReader) Close() {
}
r.ticks.Stop()
r.pool.Close()
+
+ currentLagNs.Set(0)
+
r.isOpen = false
log.Info("Heartbeat Reader: closed")
}
diff --git a/go/vt/vttablet/tabletserver/repltracker/reader_test.go b/go/vt/vttablet/tabletserver/repltracker/reader_test.go
index c4a8be6e692..87c5853f603 100644
--- a/go/vt/vttablet/tabletserver/repltracker/reader_test.go
+++ b/go/vt/vttablet/tabletserver/repltracker/reader_test.go
@@ -39,9 +39,13 @@ import (
func TestReaderReadHeartbeat(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
- tr := newReader(db, mockNowFunc)
+
+ now := time.Now()
+ tr := newReader(db, &now)
defer tr.Close()
+ tr.pool.Open(tr.env.Config().DB.AppWithDB(), tr.env.Config().DB.DbaWithDB(), tr.env.Config().DB.AppDebugWithDB())
+
db.AddQuery(fmt.Sprintf("SELECT ts FROM %s.heartbeat WHERE keyspaceShard='%s'", "_vt", tr.keyspaceShard), &sqltypes.Result{
Fields: []*querypb.Field{
{Name: "ts", Type: sqltypes.Int64},
@@ -79,14 +83,46 @@ func TestReaderReadHeartbeat(t *testing.T) {
utils.MustMatch(t, expectedHisto, heartbeatLagNsHistogram.Counts(), "wrong counts in histogram")
}
+// TestReaderCloseSetsCurrentLagToZero tests that when closing the heartbeat reader, the current lag is
+// set to zero.
+func TestReaderCloseSetsCurrentLagToZero(t *testing.T) {
+ db := fakesqldb.New(t)
+ defer db.Close()
+ tr := newReader(db, nil)
+
+ db.AddQuery(fmt.Sprintf("SELECT ts FROM %s.heartbeat WHERE keyspaceShard='%s'", "_vt", tr.keyspaceShard), &sqltypes.Result{
+ Fields: []*querypb.Field{
+ {Name: "ts", Type: sqltypes.Int64},
+ },
+ Rows: [][]sqltypes.Value{{
+ sqltypes.NewInt64(time.Now().Add(-10 * time.Second).UnixNano()),
+ }},
+ })
+
+ currentLagNs.Reset()
+
+ tr.Open()
+ time.Sleep(2 * time.Second)
+
+ assert.Greater(t, currentLagNs.Get(), int64(0), "lag should be greater than zero")
+
+ tr.Close()
+
+ assert.Equal(t, int64(0), currentLagNs.Get(), "lag should be be zero after closing the reader.")
+}
+
// TestReaderReadHeartbeatError tests that we properly account for errors
// encountered in the reading of heartbeat.
func TestReaderReadHeartbeatError(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
- tr := newReader(db, mockNowFunc)
+
+ now := time.Now()
+ tr := newReader(db, &now)
defer tr.Close()
+ tr.pool.Open(tr.env.Config().DB.AppWithDB(), tr.env.Config().DB.DbaWithDB(), tr.env.Config().DB.AppDebugWithDB())
+
cumulativeLagNs.Reset()
readErrors.Reset()
@@ -100,18 +136,23 @@ func TestReaderReadHeartbeatError(t *testing.T) {
assert.Equal(t, int64(1), readErrors.Get(), "wrong read error count")
}
-func newReader(db *fakesqldb.DB, nowFunc func() time.Time) *heartbeatReader {
+func newReader(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatReader {
config := tabletenv.NewDefaultConfig()
config.ReplicationTracker.Mode = tabletenv.Heartbeat
config.ReplicationTracker.HeartbeatIntervalSeconds = 1
params, _ := db.ConnParams().MysqlParams()
cp := *params
dbc := dbconfigs.NewTestDBConfigs(cp, cp, "")
+ config.DB = dbc
tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest"))
tr.keyspaceShard = "test:0"
- tr.now = nowFunc
- tr.pool.Open(dbc.AppWithDB(), dbc.DbaWithDB(), dbc.AppDebugWithDB())
+
+ if frozenTime != nil {
+ tr.now = func() time.Time {
+ return *frozenTime
+ }
+ }
return tr
}
diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker.go b/go/vt/vttablet/tabletserver/repltracker/repltracker.go
index db20c2e821c..906b0179eb7 100644
--- a/go/vt/vttablet/tabletserver/repltracker/repltracker.go
+++ b/go/vt/vttablet/tabletserver/repltracker/repltracker.go
@@ -17,6 +17,8 @@ limitations under the License.
package repltracker
import (
+ "errors"
+ "fmt"
"sync"
"time"
@@ -47,6 +49,8 @@ var (
heartbeatLagNsHistogram = stats.NewGenericHistogram("HeartbeatLagNsHistogram",
"Histogram of lag values in nanoseconds", []int64{0, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12},
[]string{"0", "1ms", "10ms", "100ms", "1s", "10s", "100s", "1000s", ">1000s"}, "Count", "Total")
+
+ errFallback = errors.New("failed to obtain replication lag from poller after attempting to use it as fall-back for heartbeat")
)
// ReplTracker tracks replication lag.
@@ -133,14 +137,29 @@ func (rt *ReplTracker) Status() (time.Duration, error) {
rt.mu.Lock()
defer rt.mu.Unlock()
+ fallbackToPoller := false
+ var heartbeatLag, mysqlLag time.Duration
+ var heartbeatErr, mysqlErr error
+
switch {
case rt.isPrimary || rt.mode == tabletenv.Disable:
return 0, nil
case rt.mode == tabletenv.Heartbeat:
- return rt.hr.Status()
+ // This should allow us to migrate safely to using vttablet heartbeat. If using heartbeat fails (e.g. because
+ // the shard's primary does not yet have them and therefore, either the heartbeat table is missing or it's
+ // empty), fall back to the poller. Otherwise, use what the heartbeat says.
+ if heartbeatLag, heartbeatErr = rt.hr.Status(); heartbeatErr == nil {
+ return heartbeatLag, heartbeatErr
+ }
+ fallbackToPoller = true
}
- // rt.mode == tabletenv.Poller
- return rt.poller.Status()
+ // rt.mode == tabletenv.Poller or fallback after heartbeat error
+ mysqlLag, mysqlErr = rt.poller.Status()
+ if fallbackToPoller && mysqlErr != nil {
+ return 0, fmt.Errorf("%w: %s", errFallback, mysqlErr)
+ }
+
+ return mysqlLag, mysqlErr
}
// EnableHeartbeat enables or disables writes of heartbeat. This functionality
diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go
index 362148cd3b2..33fe1a39146 100644
--- a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go
+++ b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go
@@ -25,7 +25,7 @@ import (
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/vt/dbconfigs"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
@@ -47,7 +47,7 @@ func TestReplTracker(t *testing.T) {
Uid: 1,
}
target := &querypb.Target{}
- mysqld := fakemysqldaemon.NewFakeMysqlDaemon(nil)
+ mysqld := mysqlctl.NewFakeMysqlDaemon(nil)
rt := NewReplTracker(env, alias)
rt.InitDBConfig(target, mysqld)
@@ -95,3 +95,76 @@ func TestReplTracker(t *testing.T) {
_, err = rt.Status()
assert.Equal(t, "err", err.Error())
}
+
+func TestStatusHeartbeatFallBack(t *testing.T) {
+ t.Parallel()
+
+ heartbeatErr := errors.New("some error reading heartbeat")
+ mysqlErr := errors.New("some mysql error")
+ testCases := []struct {
+ name string
+ heartbeatLag tabletenv.Seconds
+ heartbeatError error
+ mysqldLag uint
+ mysqldErr error
+ expectedError error
+ expectedLag time.Duration
+ }{
+ {
+ name: "Heartbeat successful",
+ heartbeatLag: tabletenv.Seconds(5.0),
+ heartbeatError: nil,
+ expectedLag: 5 * time.Second,
+ },
+ {
+ name: "Heartbeat failed, mysqld lag successful",
+ heartbeatError: heartbeatErr,
+ mysqldLag: 8,
+ expectedLag: 8 * time.Second,
+ },
+ {
+ name: "Heartbeat & mysqld lag failed",
+ heartbeatError: heartbeatErr,
+ mysqldErr: mysqlErr,
+ expectedError: errFallback,
+ },
+ }
+
+ for _, testCase := range testCases {
+ theCase := testCase
+
+ t.Run(theCase.name, func(t *testing.T) {
+ t.Parallel()
+ config := tabletenv.NewDefaultConfig()
+ config.ReplicationTracker.Mode = tabletenv.Heartbeat
+ config.ReplicationTracker.HeartbeatIntervalSeconds = theCase.heartbeatLag
+ env := tabletenv.NewEnv(config, "ReplTrackerTest")
+ alias := &topodatapb.TabletAlias{
+ Cell: "cell",
+ Uid: 1,
+ }
+ mysqld := mysqlctl.NewFakeMysqlDaemon(nil)
+ mysqld.ReplicationLagSeconds = theCase.mysqldLag
+ mysqld.Replicating = true
+ mysqld.ReplicationStatusError = theCase.mysqldErr
+ target := &querypb.Target{}
+
+ rt := NewReplTracker(env, alias)
+
+ rt.hr.lastKnownLag = time.Duration(theCase.heartbeatLag) * time.Second
+ rt.hr.lastKnownError = theCase.heartbeatError
+ rt.InitDBConfig(target, mysqld)
+
+ lag, err := rt.Status()
+
+ if theCase.expectedError == nil {
+ assert.NoError(t, err)
+ assert.Equal(t, theCase.expectedLag, lag)
+ } else {
+ assert.ErrorIs(t, err, theCase.expectedError)
+ }
+
+ })
+ }
+
+}
diff --git a/go/vt/vttablet/tabletserver/repltracker/writer_test.go b/go/vt/vttablet/tabletserver/repltracker/writer_test.go
index f678381ec2b..1c7b926f28b 100644
--- a/go/vt/vttablet/tabletserver/repltracker/writer_test.go
+++ b/go/vt/vttablet/tabletserver/repltracker/writer_test.go
@@ -32,17 +32,11 @@ import (
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
)
-var (
- now = time.Now()
- mockNowFunc = func() time.Time {
- return now
- }
-)
-
func TestCreateSchema(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
- tw := newTestWriter(db, mockNowFunc)
+ now := time.Now()
+ tw := newTestWriter(db, &now)
defer tw.Close()
writes.Reset()
@@ -66,7 +60,8 @@ func TestWriteHeartbeat(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
- tw := newTestWriter(db, mockNowFunc)
+ now := time.Now()
+ tw := newTestWriter(db, &now)
upsert := fmt.Sprintf("INSERT INTO %s.heartbeat (ts, tabletUid, keyspaceShard) VALUES (%d, %d, '%s') ON DUPLICATE KEY UPDATE ts=VALUES(ts), tabletUid=VALUES(tabletUid)",
"_vt", now.UnixNano(), tw.tabletAlias.Uid, tw.keyspaceShard)
db.AddQuery(upsert, &sqltypes.Result{})
@@ -83,7 +78,8 @@ func TestWriteHeartbeatError(t *testing.T) {
db := fakesqldb.New(t)
defer db.Close()
- tw := newTestWriter(db, mockNowFunc)
+ now := time.Now()
+ tw := newTestWriter(db, &now)
writes.Reset()
writeErrors.Reset()
@@ -93,7 +89,7 @@ func TestWriteHeartbeatError(t *testing.T) {
assert.Equal(t, int64(1), writeErrors.Get())
}
-func newTestWriter(db *fakesqldb.DB, nowFunc func() time.Time) *heartbeatWriter {
+func newTestWriter(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatWriter {
config := tabletenv.NewDefaultConfig()
config.ReplicationTracker.Mode = tabletenv.Heartbeat
config.ReplicationTracker.HeartbeatIntervalSeconds = 1
@@ -104,7 +100,13 @@ func newTestWriter(db *fakesqldb.DB, nowFunc func() time.Time) *heartbeatWriter
tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest"), &topodatapb.TabletAlias{Cell: "test", Uid: 1111})
tw.keyspaceShard = "test:0"
- tw.now = nowFunc
+
+ if frozenTime != nil {
+ tw.now = func() time.Time {
+ return *frozenTime
+ }
+ }
+
tw.appPool.Open(dbc.AppWithDB())
tw.allPrivsPool.Open(dbc.AllPrivsWithDB())
diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go
index 972f3183204..a23dbfe1277 100644
--- a/go/vt/vttablet/tabletserver/schema/engine.go
+++ b/go/vt/vttablet/tabletserver/schema/engine.go
@@ -18,27 +18,27 @@ package schema
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"net/http"
+ "strings"
"sync"
"time"
- "vitess.io/vitess/go/stats"
- "vitess.io/vitess/go/vt/dbconnpool"
- "vitess.io/vitess/go/vt/schema"
- "vitess.io/vitess/go/vt/vtgate/evalengine"
-
- "context"
-
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/stats"
"vitess.io/vitess/go/timer"
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/dbconnpool"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/mysqlctl/tmutils"
+ "vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vttablet/tabletserver/connpool"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
@@ -63,7 +63,7 @@ type Engine struct {
tables map[string]*Table
lastChange int64
reloadTime time.Duration
- //the position at which the schema was last loaded. it is only used in conjunction with ReloadAt
+ // the position at which the schema was last loaded. it is only used in conjunction with ReloadAt
reloadAtPos mysql.Position
notifierMu sync.Mutex
notifiers map[string]notifier
@@ -378,9 +378,15 @@ func (se *Engine) reload(ctx context.Context) error {
}
log.V(2).Infof("Reading schema for table: %s", tableName)
+ tableType := row[1].String()
table, err := LoadTable(conn, se.cp.DBName(), tableName, row[3].ToString())
if err != nil {
- rec.RecordError(err)
+ if isView := strings.Contains(tableType, tmutils.TableView); isView {
+ log.Warningf("Failed reading schema for the view: %s, error: %v", tableName, err)
+ continue
+ }
+ // Non recoverable error:
+ rec.RecordError(vterrors.Wrapf(err, "in Engine.reload(), reading table %s", tableName))
continue
}
table.FileSize = fileSize
@@ -587,6 +593,32 @@ func (se *Engine) GetSchema() map[string]*Table {
return tables
}
+// MarshalMinimalSchema returns a protobuf encoded binlogdata.MinimalSchema
+func (se *Engine) MarshalMinimalSchema() ([]byte, error) {
+ se.mu.Lock()
+ defer se.mu.Unlock()
+ dbSchema := &binlogdatapb.MinimalSchema{
+ Tables: make([]*binlogdatapb.MinimalTable, 0, len(se.tables)),
+ }
+ for _, table := range se.tables {
+ dbSchema.Tables = append(dbSchema.Tables, newMinimalTable(table))
+ }
+ return dbSchema.MarshalVT()
+}
+
+func newMinimalTable(st *Table) *binlogdatapb.MinimalTable {
+ table := &binlogdatapb.MinimalTable{
+ Name: st.Name.String(),
+ Fields: st.Fields,
+ }
+ pkc := make([]int64, len(st.PKColumns))
+ for i, pk := range st.PKColumns {
+ pkc[i] = int64(pk)
+ }
+ table.PKColumns = pkc
+ return table
+}
+
// GetConnection returns a connection from the pool
func (se *Engine) GetConnection(ctx context.Context) (*connpool.DBConn, error) {
return se.conns.Get(ctx, nil)
diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go
index c38a63e2481..79f3bf5d3b7 100644
--- a/go/vt/vttablet/tabletserver/schema/engine_test.go
+++ b/go/vt/vttablet/tabletserver/schema/engine_test.go
@@ -18,6 +18,7 @@ package schema
import (
"context"
+ "errors"
"expvar"
"fmt"
"net/http"
@@ -27,20 +28,19 @@ import (
"testing"
"time"
- "vitess.io/vitess/go/test/utils"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/event/syslogger"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/dbconfigs"
+ querypb "vitess.io/vitess/go/vt/proto/query"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vttablet/tabletserver/schema/schematest"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
-
- querypb "vitess.io/vitess/go/vt/proto/query"
)
const baseShowTablesPattern = `SELECT t\.table_name.*`
@@ -201,7 +201,7 @@ func TestOpenAndReload(t *testing.T) {
assert.Equal(t, int64(0), se.tableAllocatedSizeGauge.Counts()["msg"])
assert.Equal(t, int64(0), se.tableFileSizeGauge.Counts()["msg"])
- //ReloadAt tests
+ // ReloadAt tests
pos1, err := mysql.DecodePosition("MariaDB/0-41983-20")
require.NoError(t, err)
pos2, err := mysql.DecodePosition("MariaDB/0-41983-40")
@@ -423,7 +423,10 @@ func TestOpenFailedDueToExecErr(t *testing.T) {
}
}
-func TestOpenFailedDueToTableErr(t *testing.T) {
+// TestOpenFailedDueToLoadTableErr tests that schema engine load should not fail instead should log the failures.
+func TestOpenFailedDueToLoadTableErr(t *testing.T) {
+ tl := syslogger.NewTestLogger()
+ defer tl.Close()
db := fakesqldb.New(t)
defer db.Close()
schematest.AddDefaultQueries(db)
@@ -431,27 +434,64 @@ func TestOpenFailedDueToTableErr(t *testing.T) {
Fields: mysql.BaseShowTablesFields,
Rows: [][]sqltypes.Value{
mysql.BaseShowTablesRow("test_table", false, ""),
+ mysql.BaseShowTablesRow("test_view", true, "VIEW"),
},
})
- db.MockQueriesForTable("test_table", &sqltypes.Result{
- // this will cause NewTable error, as it expects zero rows.
- Fields: []*querypb.Field{
- {
- Type: querypb.Type_VARCHAR,
- },
- },
+ // this will cause NewTable error, as it expects zero rows.
+ db.MockQueriesForTable("test_table", sqltypes.MakeTestResult(sqltypes.MakeTestFields("foo", "varchar"), ""))
+
+ // adding column query for table_view
+ db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "test_view"),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), ""))
+ // rejecting the impossible query
+ db.AddRejectedQuery("SELECT * FROM `fakesqldb`.`test_view` WHERE 1 != 1", mysql.NewSQLErrorFromError(errors.New("The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)")))
+
+ AddFakeInnoDBReadRowsResult(db, 0)
+ se := newEngine(10, 1*time.Second, 1*time.Second, db)
+ err := se.Open()
+ // failed load should return an error because of test_table
+ assert.ErrorContains(t, err, "Row count exceeded")
+
+ logs := tl.GetAllLogs()
+ logOutput := strings.Join(logs, ":::")
+ assert.Contains(t, logOutput, "WARNING:Failed reading schema for the view: test_view")
+ assert.Contains(t, logOutput, "The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)")
+}
+
+// TestOpenNoErrorDueToInvalidViews tests that schema engine load does not fail instead should log the failures for the views
+func TestOpenNoErrorDueToInvalidViews(t *testing.T) {
+ tl := syslogger.NewTestLogger()
+ defer tl.Close()
+ db := fakesqldb.New(t)
+ defer db.Close()
+ schematest.AddDefaultQueries(db)
+ db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{
+ Fields: mysql.BaseShowTablesFields,
Rows: [][]sqltypes.Value{
- {sqltypes.NewVarBinary("")},
+ mysql.BaseShowTablesRow("foo_view", true, "VIEW"),
+ mysql.BaseShowTablesRow("bar_view", true, "VIEW"),
},
})
+ // adding column query for table_view
+ db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "foo_view"),
+ &sqltypes.Result{})
+ db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "bar_view"),
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), "col1", "col2"))
+ // rejecting the impossible query
+ db.AddRejectedQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`bar_view` WHERE 1 != 1", mysql.NewSQLError(mysql.ERWrongFieldWithGroup, mysql.SSClientError, "random error for table bar_view"))
+
AddFakeInnoDBReadRowsResult(db, 0)
se := newEngine(10, 1*time.Second, 1*time.Second, db)
err := se.Open()
- want := "Row count exceeded"
- if err == nil || !strings.Contains(err.Error(), want) {
- t.Errorf("se.Open: %v, want %s", err, want)
- }
+ require.NoError(t, err)
+
+ logs := tl.GetAllLogs()
+ logOutput := strings.Join(logs, ":::")
+ assert.Contains(t, logOutput, "WARNING:Failed reading schema for the view: foo_view")
+ assert.Contains(t, logOutput, "unable to get columns for table fakesqldb.foo_view")
+ assert.Contains(t, logOutput, "WARNING:Failed reading schema for the view: bar_view")
+ assert.Contains(t, logOutput, "random error for table bar_view")
}
func TestExportVars(t *testing.T) {
diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go
index 51204e4db3f..3bfa19a36d0 100644
--- a/go/vt/vttablet/tabletserver/schema/tracker.go
+++ b/go/vt/vttablet/tabletserver/schema/tracker.go
@@ -23,8 +23,6 @@ import (
"sync"
"time"
- "google.golang.org/protobuf/proto"
-
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/mysql"
@@ -240,14 +238,10 @@ func (tr *Tracker) schemaUpdated(gtid string, ddl string, timestamp int64) error
}
func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, timestamp int64) error {
- tables := tr.engine.GetSchema()
- dbSchema := &binlogdatapb.MinimalSchema{
- Tables: []*binlogdatapb.MinimalTable{},
- }
- for _, table := range tables {
- dbSchema.Tables = append(dbSchema.Tables, newMinimalTable(table))
+ blob, err := tr.engine.MarshalMinimalSchema()
+ if err != nil {
+ return err
}
- blob, _ := proto.Marshal(dbSchema)
conn, err := tr.engine.GetConnection(ctx)
if err != nil {
@@ -265,19 +259,6 @@ func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string,
return nil
}
-func newMinimalTable(st *Table) *binlogdatapb.MinimalTable {
- table := &binlogdatapb.MinimalTable{
- Name: st.Name.String(),
- Fields: st.Fields,
- }
- var pkc []int64
- for _, pk := range st.PKColumns {
- pkc = append(pkc, int64(pk))
- }
- table.PKColumns = pkc
- return table
-}
-
func encodeString(in string) string {
buf := bytes.NewBuffer(nil)
sqltypes.NewVarChar(in).EncodeSQL(buf)
diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go
index 341bc12d1fd..f8c2e6931e6 100644
--- a/go/vt/vttablet/tabletserver/state_manager.go
+++ b/go/vt/vttablet/tabletserver/state_manager.go
@@ -38,10 +38,14 @@ type servingState int64
const (
// StateNotConnected is the state where tabletserver is not
- // connected to an underlying mysql instance.
+ // connected to an underlying mysql instance. In this state we close
+ // query engine since MySQL is probably unavailable
StateNotConnected = servingState(iota)
// StateNotServing is the state where tabletserver is connected
// to an underlying mysql instance, but is not serving queries.
+ // We do not close the query engine to not close the pool. We keep
+ // the query engine open but prevent queries from running by blocking them
+ // in StartRequest.
StateNotServing
// StateServing is where queries are allowed.
StateServing
@@ -325,11 +329,25 @@ func (sm *stateManager) CheckMySQL() {
}
defer sm.transitioning.Release()
+ // This is required to prevent new queries from running in StartRequest
+ // unless they are part of a running transaction.
+ sm.setWantState(StateNotConnected)
sm.closeAll()
+
+ // Now that we reached the NotConnected state, we want to go back to the
+ // Serving state. The retry will only succeed once MySQL is reachable again
+ // Until then EnsureConnectionAndDB will error out.
+ sm.setWantState(StateServing)
sm.retryTransition(fmt.Sprintf("Cannot connect to MySQL, shutting down query service: %v", err))
}()
}
+func (sm *stateManager) setWantState(stateWanted servingState) {
+ sm.mu.Lock()
+ defer sm.mu.Unlock()
+ sm.wantState = stateWanted
+}
+
// StopService shuts down sm. If the shutdown doesn't complete
// within timeBombDuration, it crashes the process.
func (sm *stateManager) StopService() {
@@ -410,6 +428,7 @@ func (sm *stateManager) servePrimary() error {
return err
}
+ sm.hs.MakePrimary(true)
sm.rt.MakePrimary()
sm.tracker.Open()
// We instantly kill all stateful queries to allow for
@@ -434,6 +453,7 @@ func (sm *stateManager) unservePrimary() error {
return err
}
+ sm.hs.MakePrimary(false)
sm.rt.MakePrimary()
sm.setState(topodatapb.TabletType_PRIMARY, StateNotServing)
return nil
@@ -450,6 +470,7 @@ func (sm *stateManager) serveNonPrimary(wantTabletType topodatapb.TabletType) er
sm.messager.Close()
sm.tracker.Close()
sm.se.MakeNonPrimary()
+ sm.hs.MakeNonPrimary()
if err := sm.connect(wantTabletType); err != nil {
return err
@@ -467,6 +488,7 @@ func (sm *stateManager) unserveNonPrimary(wantTabletType topodatapb.TabletType)
sm.unserveCommon()
sm.se.MakeNonPrimary()
+ sm.hs.MakeNonPrimary()
if err := sm.connect(wantTabletType); err != nil {
return err
diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go
index 4953a3affe4..bb809d5b142 100644
--- a/go/vt/vttablet/tabletserver/state_manager_test.go
+++ b/go/vt/vttablet/tabletserver/state_manager_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package tabletserver
import (
+ "context"
"errors"
"sync"
"testing"
@@ -26,8 +27,6 @@ import (
"vitess.io/vitess/go/mysql/fakesqldb"
- "context"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -457,9 +456,16 @@ func TestStateManagerCheckMySQL(t *testing.T) {
err := sm.SetServingType(topodatapb.TabletType_PRIMARY, testNow, StateServing, "")
require.NoError(t, err)
+ sm.te = &delayedTxEngine{}
sm.qe.(*testQueryEngine).failMySQL = true
order.Set(0)
sm.CheckMySQL()
+ // We know checkMySQL will take atleast 50 milliseconds since txEngine.Close has a sleep in the test code
+ time.Sleep(10 * time.Millisecond)
+ // this asserts that checkMySQL is running
+ assert.EqualValues(t, 0, sm.checkMySQLThrottler.Size())
+ // When we are in CheckMySQL state, we should not be accepting any new requests which aren't transactional
+ assert.False(t, sm.IsServing())
// Rechecking immediately should be a no-op:
sm.CheckMySQL()
@@ -491,6 +497,7 @@ func TestStateManagerCheckMySQL(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
+ assert.True(t, sm.IsServing())
assert.Equal(t, topodatapb.TabletType_PRIMARY, sm.Target().TabletType)
assert.Equal(t, StateServing, sm.State())
}
diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go
index e8bb869392f..984f5b44f9c 100644
--- a/go/vt/vttablet/tabletserver/tabletenv/config.go
+++ b/go/vt/vttablet/tabletserver/tabletenv/config.go
@@ -23,18 +23,23 @@ import (
"time"
"github.com/spf13/pflag"
-
- "vitess.io/vitess/go/flagutil"
- "vitess.io/vitess/go/vt/servenv"
-
"google.golang.org/protobuf/encoding/prototext"
"vitess.io/vitess/go/cache"
+ "vitess.io/vitess/go/flagutil"
"vitess.io/vitess/go/streamlog"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/log"
- querypb "vitess.io/vitess/go/vt/proto/query"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/throttler"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
// These constants represent values for various config parameters.
@@ -85,6 +90,24 @@ var (
txLogHandler = "/debug/txlog"
)
+type TxThrottlerConfigFlag struct {
+ *throttlerdatapb.Configuration
+}
+
+func NewTxThrottlerConfigFlag() *TxThrottlerConfigFlag {
+ return &TxThrottlerConfigFlag{&throttlerdatapb.Configuration{}}
+}
+
+func (t *TxThrottlerConfigFlag) Get() *throttlerdatapb.Configuration {
+ return t.Configuration
+}
+
+func (t *TxThrottlerConfigFlag) Set(arg string) error {
+ return prototext.Unmarshal([]byte(arg), t.Configuration)
+}
+
+func (t *TxThrottlerConfigFlag) Type() string { return "string" }
+
// RegisterTabletEnvFlags is a public API to register tabletenv flags for use by test cases that expect
// some flags to be set with default values
func RegisterTabletEnvFlags(fs *pflag.FlagSet) {
@@ -96,13 +119,16 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) {
fs.StringVar(&txLogHandler, "transaction-log-stream-handler", txLogHandler, "URL handler for streaming transactions log")
fs.IntVar(¤tConfig.OltpReadPool.Size, "queryserver-config-pool-size", defaultConfig.OltpReadPool.Size, "query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction)")
- fs.IntVar(¤tConfig.OltpReadPool.PrefillParallelism, "queryserver-config-pool-prefill-parallelism", defaultConfig.OltpReadPool.PrefillParallelism, "(DEPRECATED) query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.")
+ fs.IntVar(¤tConfig.OltpReadPool.PrefillParallelism, "queryserver-config-pool-prefill-parallelism", defaultConfig.OltpReadPool.PrefillParallelism, "Query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.")
+ _ = fs.MarkDeprecated("queryserver-config-pool-prefill-parallelism", "it will be removed in a future release.")
fs.IntVar(¤tConfig.OlapReadPool.Size, "queryserver-config-stream-pool-size", defaultConfig.OlapReadPool.Size, "query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion")
- fs.IntVar(¤tConfig.OlapReadPool.PrefillParallelism, "queryserver-config-stream-pool-prefill-parallelism", defaultConfig.OlapReadPool.PrefillParallelism, "(DEPRECATED) query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism")
+ fs.IntVar(¤tConfig.OlapReadPool.PrefillParallelism, "queryserver-config-stream-pool-prefill-parallelism", defaultConfig.OlapReadPool.PrefillParallelism, "Query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism")
+ _ = fs.MarkDeprecated("queryserver-config-stream-pool-prefill-parallelism", "it will be removed in a future release.")
fs.IntVar(¤tConfig.TxPool.Size, "queryserver-config-transaction-cap", defaultConfig.TxPool.Size, "query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout)")
- fs.IntVar(¤tConfig.TxPool.PrefillParallelism, "queryserver-config-transaction-prefill-parallelism", defaultConfig.TxPool.PrefillParallelism, "(DEPRECATED) query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.")
+ fs.IntVar(¤tConfig.TxPool.PrefillParallelism, "queryserver-config-transaction-prefill-parallelism", defaultConfig.TxPool.PrefillParallelism, "Query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.")
+ _ = fs.MarkDeprecated("queryserver-config-transaction-prefill-parallelism", "it will be removed in a future release.")
fs.IntVar(¤tConfig.MessagePostponeParallelism, "queryserver-config-message-postpone-cap", defaultConfig.MessagePostponeParallelism, "query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem.")
- SecondsVar(fs, ¤tConfig.Oltp.TxTimeoutSeconds, "queryserver-config-transaction-timeout", defaultConfig.Oltp.TxTimeoutSeconds, "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.Oltp.TxTimeoutSeconds), "queryserver-config-transaction-timeout", "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value")
SecondsVar(fs, ¤tConfig.GracePeriods.ShutdownSeconds, "shutdown_grace_period", defaultConfig.GracePeriods.ShutdownSeconds, "how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.")
fs.IntVar(¤tConfig.Oltp.MaxRows, "queryserver-config-max-result-size", defaultConfig.Oltp.MaxRows, "query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries.")
fs.IntVar(¤tConfig.Oltp.WarnRows, "queryserver-config-warn-result-size", defaultConfig.Oltp.WarnRows, "query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this")
@@ -112,15 +138,15 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) {
fs.IntVar(¤tConfig.QueryCacheSize, "queryserver-config-query-cache-size", defaultConfig.QueryCacheSize, "query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.")
fs.Int64Var(¤tConfig.QueryCacheMemory, "queryserver-config-query-cache-memory", defaultConfig.QueryCacheMemory, "query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.")
fs.BoolVar(¤tConfig.QueryCacheLFU, "queryserver-config-query-cache-lfu", defaultConfig.QueryCacheLFU, "query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries")
- SecondsVar(fs, ¤tConfig.SchemaReloadIntervalSeconds, "queryserver-config-schema-reload-time", defaultConfig.SchemaReloadIntervalSeconds, "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.")
- SecondsVar(fs, ¤tConfig.SignalSchemaChangeReloadIntervalSeconds, "queryserver-config-schema-change-signal-interval", defaultConfig.SignalSchemaChangeReloadIntervalSeconds, "query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate.")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.SchemaReloadIntervalSeconds), "queryserver-config-schema-reload-time", "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.SignalSchemaChangeReloadIntervalSeconds), "queryserver-config-schema-change-signal-interval", "query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate.")
fs.BoolVar(¤tConfig.SignalWhenSchemaChange, "queryserver-config-schema-change-signal", defaultConfig.SignalWhenSchemaChange, "query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work")
- SecondsVar(fs, ¤tConfig.Olap.TxTimeoutSeconds, "queryserver-config-olap-transaction-timeout", defaultConfig.Olap.TxTimeoutSeconds, "query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed")
- SecondsVar(fs, ¤tConfig.Oltp.QueryTimeoutSeconds, "queryserver-config-query-timeout", defaultConfig.Oltp.QueryTimeoutSeconds, "query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.")
- SecondsVar(fs, ¤tConfig.OltpReadPool.TimeoutSeconds, "queryserver-config-query-pool-timeout", defaultConfig.OltpReadPool.TimeoutSeconds, "query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.")
- SecondsVar(fs, ¤tConfig.OlapReadPool.TimeoutSeconds, "queryserver-config-stream-pool-timeout", defaultConfig.OlapReadPool.TimeoutSeconds, "query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.")
- SecondsVar(fs, ¤tConfig.TxPool.TimeoutSeconds, "queryserver-config-txpool-timeout", defaultConfig.TxPool.TimeoutSeconds, "query server transaction pool timeout, it is how long vttablet waits if tx pool is full")
- SecondsVar(fs, ¤tConfig.OltpReadPool.IdleTimeoutSeconds, "queryserver-config-idle-timeout", defaultConfig.OltpReadPool.IdleTimeoutSeconds, "query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.Olap.TxTimeoutSeconds), "queryserver-config-olap-transaction-timeout", "query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.Oltp.QueryTimeoutSeconds), "queryserver-config-query-timeout", "query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.OltpReadPool.TimeoutSeconds), "queryserver-config-query-pool-timeout", "query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.OlapReadPool.TimeoutSeconds), "queryserver-config-stream-pool-timeout", "query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.TxPool.TimeoutSeconds), "queryserver-config-txpool-timeout", "query server transaction pool timeout, it is how long vttablet waits if tx pool is full")
+ fs.Var((*flagutil.DurationOrSecondsFloatFlag)(¤tConfig.OltpReadPool.IdleTimeoutSeconds), "queryserver-config-idle-timeout", "query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.")
fs.IntVar(¤tConfig.OltpReadPool.MaxWaiters, "queryserver-config-query-pool-waiter-cap", defaultConfig.OltpReadPool.MaxWaiters, "query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection")
fs.IntVar(¤tConfig.OlapReadPool.MaxWaiters, "queryserver-config-stream-pool-waiter-cap", defaultConfig.OlapReadPool.MaxWaiters, "query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection")
fs.IntVar(¤tConfig.TxPool.MaxWaiters, "queryserver-config-txpool-waiter-cap", defaultConfig.TxPool.MaxWaiters, "query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection")
@@ -135,9 +161,14 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) {
fs.BoolVar(¤tConfig.TwoPCEnable, "twopc_enable", defaultConfig.TwoPCEnable, "if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.")
fs.StringVar(¤tConfig.TwoPCCoordinatorAddress, "twopc_coordinator_address", defaultConfig.TwoPCCoordinatorAddress, "address of the (VTGate) process(es) that will be used to notify of abandoned transactions.")
SecondsVar(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, "time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.")
+ // Tx throttler config
flagutil.DualFormatBoolVar(fs, ¤tConfig.EnableTxThrottler, "enable_tx_throttler", defaultConfig.EnableTxThrottler, "If true replication-lag-based throttling on transactions will be enabled.")
- flagutil.DualFormatStringVar(fs, ¤tConfig.TxThrottlerConfig, "tx_throttler_config", defaultConfig.TxThrottlerConfig, "The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message")
+ flagutil.DualFormatVar(fs, currentConfig.TxThrottlerConfig, "tx_throttler_config", "The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message.")
flagutil.DualFormatStringListVar(fs, ¤tConfig.TxThrottlerHealthCheckCells, "tx_throttler_healthcheck_cells", defaultConfig.TxThrottlerHealthCheckCells, "A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.")
+ fs.IntVar(¤tConfig.TxThrottlerDefaultPriority, "tx-throttler-default-priority", defaultConfig.TxThrottlerDefaultPriority, "Default priority assigned to queries that lack priority information")
+ fs.Var(currentConfig.TxThrottlerTabletTypes, "tx-throttler-tablet-types", "A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly.")
+ fs.BoolVar(¤tConfig.TxThrottlerDryRun, "tx-throttler-dry-run", defaultConfig.TxThrottlerDryRun, "If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests.")
+ fs.DurationVar(¤tConfig.TxThrottlerTopoRefreshInterval, "tx-throttler-topo-refresh-interval", time.Minute*5, "The rate that the transaction throttler will refresh the topology to find cells.")
fs.BoolVar(&enableHotRowProtection, "enable_hot_row_protection", false, "If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.")
fs.BoolVar(&enableHotRowProtectionDryRun, "enable_hot_row_protection_dry_run", false, "If true, hot row protection is not enforced but logs if transactions would have been queued.")
@@ -163,7 +194,9 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) {
flagutil.DualFormatBoolVar(fs, &enableConsolidatorReplicas, "enable_consolidator_replicas", false, "This option enables the query consolidator only on replicas.")
fs.Int64Var(¤tConfig.ConsolidatorStreamQuerySize, "consolidator-stream-query-size", defaultConfig.ConsolidatorStreamQuerySize, "Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator.")
fs.Int64Var(¤tConfig.ConsolidatorStreamTotalSize, "consolidator-stream-total-size", defaultConfig.ConsolidatorStreamTotalSize, "Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator.")
- flagutil.DualFormatBoolVar(fs, ¤tConfig.DeprecatedCacheResultFields, "enable_query_plan_field_caching", defaultConfig.DeprecatedCacheResultFields, "(DEPRECATED) This option fetches & caches fields (columns) when storing query plans")
+ flagutil.DualFormatBoolVar(fs, ¤tConfig.DeprecatedCacheResultFields, "enable_query_plan_field_caching", defaultConfig.DeprecatedCacheResultFields, "This option fetches & caches fields (columns) when storing query plans")
+ _ = fs.MarkDeprecated("enable_query_plan_field_caching", "it will be removed in a future release.")
+ _ = fs.MarkDeprecated("enable-query-plan-field-caching", "it will be removed in a future release.")
fs.DurationVar(&healthCheckInterval, "health_check_interval", 20*time.Second, "Interval between health checks")
fs.DurationVar(°radedThreshold, "degraded_threshold", 30*time.Second, "replication lag after which a replica is considered degraded")
@@ -177,6 +210,8 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) {
fs.Int64Var(¤tConfig.RowStreamer.MaxInnoDBTrxHistLen, "vreplication_copy_phase_max_innodb_history_list_length", 1000000, "The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet.")
fs.Int64Var(¤tConfig.RowStreamer.MaxMySQLReplLagSecs, "vreplication_copy_phase_max_mysql_replication_lag", 43200, "The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet.")
+
+ fs.BoolVar(¤tConfig.EnablePerWorkloadTableMetrics, "enable-per-workload-table-metrics", defaultConfig.EnablePerWorkloadTableMetrics, "If true, query counts and query error metrics include a label that identifies the workload")
}
var (
@@ -302,9 +337,13 @@ type TabletConfig struct {
TwoPCCoordinatorAddress string `json:"-"`
TwoPCAbandonAge Seconds `json:"-"`
- EnableTxThrottler bool `json:"-"`
- TxThrottlerConfig string `json:"-"`
- TxThrottlerHealthCheckCells []string `json:"-"`
+ EnableTxThrottler bool `json:"-"`
+ TxThrottlerConfig *TxThrottlerConfigFlag `json:"-"`
+ TxThrottlerHealthCheckCells []string `json:"-"`
+ TxThrottlerDefaultPriority int `json:"-"`
+ TxThrottlerTabletTypes *topoproto.TabletTypeListFlag `json:"-"`
+ TxThrottlerTopoRefreshInterval time.Duration `json:"-"`
+ TxThrottlerDryRun bool `json:"-"`
EnableLagThrottler bool `json:"-"`
@@ -315,6 +354,8 @@ type TabletConfig struct {
EnableSettingsPool bool `json:"-"`
RowStreamer RowStreamerConfig `json:"rowStreamer,omitempty"`
+
+ EnablePerWorkloadTableMetrics bool `json:"-"`
}
// ConnPoolConfig contains the config for a conn pool.
@@ -438,6 +479,9 @@ func (c *TabletConfig) Verify() error {
if err := c.verifyTransactionLimitConfig(); err != nil {
return err
}
+ if err := c.verifyTxThrottlerConfig(); err != nil {
+ return err
+ }
if v := c.HotRowProtection.MaxQueueSize; v <= 0 {
return fmt.Errorf("-hot_row_protection_max_queue_size must be > 0 (specified value: %v)", v)
}
@@ -483,6 +527,36 @@ func (c *TabletConfig) verifyTransactionLimitConfig() error {
return nil
}
+// verifyTxThrottlerConfig checks the TxThrottler related config for sanity.
+func (c *TabletConfig) verifyTxThrottlerConfig() error {
+ if !c.EnableTxThrottler {
+ return nil
+ }
+
+ err := throttler.MaxReplicationLagModuleConfig{Configuration: c.TxThrottlerConfig.Get()}.Verify()
+ if err != nil {
+ return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "failed to parse throttlerdatapb.Configuration config: %v", err)
+ }
+
+ if v := c.TxThrottlerDefaultPriority; v > sqlparser.MaxPriorityValue || v < 0 {
+ return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "--tx-throttler-default-priority must be > 0 and < 100 (specified value: %d)", v)
+ }
+
+ if c.TxThrottlerTabletTypes == nil || len(*c.TxThrottlerTabletTypes) == 0 {
+ return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "--tx-throttler-tablet-types must be defined when transaction throttler is enabled")
+ }
+ for _, tabletType := range *c.TxThrottlerTabletTypes {
+ switch tabletType {
+ case topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY:
+ continue
+ default:
+ return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported tablet type %q", tabletType)
+ }
+ }
+
+ return nil
+}
+
// Some of these values are for documentation purposes.
// They actually get overwritten during Init.
var defaultConfig = TabletConfig{
@@ -546,9 +620,13 @@ var defaultConfig = TabletConfig{
DeprecatedCacheResultFields: true,
SignalWhenSchemaChange: true,
- EnableTxThrottler: false,
- TxThrottlerConfig: defaultTxThrottlerConfig(),
- TxThrottlerHealthCheckCells: []string{},
+ EnableTxThrottler: false,
+ TxThrottlerConfig: defaultTxThrottlerConfig(),
+ TxThrottlerHealthCheckCells: []string{},
+ TxThrottlerDefaultPriority: sqlparser.MaxPriorityValue, // This leads to all queries being candidates to throttle
+ TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA},
+ TxThrottlerDryRun: false,
+ TxThrottlerTopoRefreshInterval: time.Minute * 5,
EnableLagThrottler: false, // Feature flag; to switch to 'true' at some stage in the future
@@ -561,19 +639,20 @@ var defaultConfig = TabletConfig{
MaxInnoDBTrxHistLen: 1000000,
MaxMySQLReplLagSecs: 43200,
},
+
+ EnablePerWorkloadTableMetrics: false,
}
-// defaultTxThrottlerConfig formats the default throttlerdata.Configuration
-// object in text format. It uses the object returned by
-// throttler.DefaultMaxReplicationLagModuleConfig().Configuration and overrides some of its
-// fields. It panics on error.
-func defaultTxThrottlerConfig() string {
+// defaultTxThrottlerConfig returns the default TxThrottlerConfigFlag object based on
+// a throttler.DefaultMaxReplicationLagModuleConfig().Configuration and overrides some of
+// its fields. It panics on error.
+func defaultTxThrottlerConfig() *TxThrottlerConfigFlag {
// Take throttler.DefaultMaxReplicationLagModuleConfig and override some fields.
config := throttler.DefaultMaxReplicationLagModuleConfig().Configuration
// TODO(erez): Make DefaultMaxReplicationLagModuleConfig() return a MaxReplicationLagSec of 10
// and remove this line.
config.MaxReplicationLagSec = 10
- return prototext.Format(config)
+ return &TxThrottlerConfigFlag{config}
}
func defaultTransactionLimitConfig() TransactionLimitConfig {
diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go
index 3235cf31fd2..1eae5218d2a 100644
--- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go
+++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go
@@ -26,7 +26,13 @@ import (
"vitess.io/vitess/go/test/utils"
"vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/throttler"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/yaml2"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
func TestConfigParse(t *testing.T) {
@@ -320,3 +326,127 @@ func TestFlags(t *testing.T) {
want.SanitizeLogMessages = true
assert.Equal(t, want, currentConfig)
}
+
+func TestTxThrottlerConfigFlag(t *testing.T) {
+ f := NewTxThrottlerConfigFlag()
+ defaultMaxReplicationLagModuleConfig := throttler.DefaultMaxReplicationLagModuleConfig().Configuration
+
+ {
+ assert.Nil(t, f.Set(defaultMaxReplicationLagModuleConfig.String()))
+ assert.Equal(t, defaultMaxReplicationLagModuleConfig.String(), f.String())
+ assert.Equal(t, "string", f.Type())
+ }
+ {
+ defaultMaxReplicationLagModuleConfig.TargetReplicationLagSec = 5
+ assert.Nil(t, f.Set(defaultMaxReplicationLagModuleConfig.String()))
+ assert.NotNil(t, f.Get())
+ assert.Equal(t, int64(5), f.Get().TargetReplicationLagSec)
+ }
+ {
+ assert.NotNil(t, f.Set("should not parse"))
+ }
+}
+
+func TestVerifyTxThrottlerConfig(t *testing.T) {
+ defaultMaxReplicationLagModuleConfig := throttler.DefaultMaxReplicationLagModuleConfig().Configuration
+ invalidMaxReplicationLagModuleConfig := throttler.DefaultMaxReplicationLagModuleConfig().Configuration
+ invalidMaxReplicationLagModuleConfig.TargetReplicationLagSec = -1
+
+ type testConfig struct {
+ Name string
+ ExpectedErrorCode vtrpcpb.Code
+ //
+ EnableTxThrottler bool
+ TxThrottlerConfig *TxThrottlerConfigFlag
+ TxThrottlerHealthCheckCells []string
+ TxThrottlerTabletTypes *topoproto.TabletTypeListFlag
+ TxThrottlerDefaultPriority int
+ }
+
+ tests := []testConfig{
+ {
+ // default (disabled)
+ Name: "default",
+ EnableTxThrottler: false,
+ },
+ {
+ // enabled with invalid throttler config
+ Name: "enabled invalid config",
+ ExpectedErrorCode: vtrpcpb.Code_INVALID_ARGUMENT,
+ EnableTxThrottler: true,
+ TxThrottlerConfig: &TxThrottlerConfigFlag{invalidMaxReplicationLagModuleConfig},
+ },
+ {
+ // enabled with good config (default/replica tablet type)
+ Name: "enabled",
+ EnableTxThrottler: true,
+ TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig},
+ TxThrottlerHealthCheckCells: []string{"cell1"},
+ },
+ {
+ // enabled + replica and rdonly tablet types
+ Name: "enabled plus rdonly",
+ EnableTxThrottler: true,
+ TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig},
+ TxThrottlerHealthCheckCells: []string{"cell1"},
+ TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_RDONLY,
+ },
+ },
+ {
+ // enabled without tablet types
+ Name: "enabled without tablet types",
+ ExpectedErrorCode: vtrpcpb.Code_FAILED_PRECONDITION,
+ EnableTxThrottler: true,
+ TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig},
+ TxThrottlerHealthCheckCells: []string{"cell1"},
+ TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{},
+ },
+ {
+ // enabled + disallowed tablet type
+ Name: "enabled disallowed tablet type",
+ ExpectedErrorCode: vtrpcpb.Code_INVALID_ARGUMENT,
+ EnableTxThrottler: true,
+ TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig},
+ TxThrottlerHealthCheckCells: []string{"cell1"},
+ TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{topodatapb.TabletType_DRAINED},
+ },
+ {
+ // enabled + disallowed priority
+ Name: "enabled disallowed priority",
+ ExpectedErrorCode: vtrpcpb.Code_INVALID_ARGUMENT,
+ EnableTxThrottler: true,
+ TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig},
+ TxThrottlerDefaultPriority: 12345,
+ TxThrottlerHealthCheckCells: []string{"cell1"},
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.Name, func(t *testing.T) {
+ t.Parallel()
+
+ config := defaultConfig
+ config.EnableTxThrottler = test.EnableTxThrottler
+ if test.TxThrottlerConfig == nil {
+ test.TxThrottlerConfig = NewTxThrottlerConfigFlag()
+ }
+ config.TxThrottlerConfig = test.TxThrottlerConfig
+ config.TxThrottlerHealthCheckCells = test.TxThrottlerHealthCheckCells
+ config.TxThrottlerDefaultPriority = test.TxThrottlerDefaultPriority
+ if test.TxThrottlerTabletTypes != nil {
+ config.TxThrottlerTabletTypes = test.TxThrottlerTabletTypes
+ }
+
+ err := config.verifyTxThrottlerConfig()
+ if test.ExpectedErrorCode == vtrpcpb.Code_OK {
+ assert.Nil(t, err)
+ } else {
+ assert.NotNil(t, err)
+ assert.Equal(t, test.ExpectedErrorCode, vterrors.Code(err))
+ }
+ })
+ }
+}
diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go
index c00054157c2..cc3160fe1a9 100644
--- a/go/vt/vttablet/tabletserver/tabletserver.go
+++ b/go/vt/vttablet/tabletserver/tabletserver.go
@@ -112,7 +112,7 @@ type TabletServer struct {
tracker *schema.Tracker
watcher *BinlogWatcher
qe *QueryEngine
- txThrottler *txthrottler.TxThrottler
+ txThrottler txthrottler.TxThrottler
te *TxEngine
messager *messager.Engine
hs *healthStreamer
@@ -179,7 +179,7 @@ func NewTabletServer(name string, config *tabletenv.TabletConfig, topoServer *to
tsv.tracker = schema.NewTracker(tsv, tsv.vstreamer, tsv.se)
tsv.watcher = NewBinlogWatcher(tsv, tsv.vstreamer, tsv.config)
tsv.qe = NewQueryEngine(tsv, tsv.se)
- tsv.txThrottler = txthrottler.NewTxThrottler(tsv.config, topoServer)
+ tsv.txThrottler = txthrottler.NewTxThrottler(tsv, topoServer)
tsv.te = NewTxEngine(tsv)
tsv.messager = messager.NewEngine(tsv, tsv.se, tsv.vstreamer)
@@ -488,8 +488,8 @@ func (tsv *TabletServer) begin(ctx context.Context, target *querypb.Target, save
target, options, false, /* allowOnShutdown */
func(ctx context.Context, logStats *tabletenv.LogStats) error {
startTime := time.Now()
- if tsv.txThrottler.Throttle() {
- return vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction throttled")
+ if tsv.txThrottler.Throttle(tsv.getPriorityFromOptions(options), options.GetWorkloadName()) {
+ return errTxThrottled
}
var connSetting *pools.Setting
if len(settings) > 0 {
@@ -520,6 +520,30 @@ func (tsv *TabletServer) begin(ctx context.Context, target *querypb.Target, save
return state, err
}
+func (tsv *TabletServer) getPriorityFromOptions(options *querypb.ExecuteOptions) int {
+ priority := tsv.config.TxThrottlerDefaultPriority
+ if options == nil {
+ return priority
+ }
+ if options.Priority == "" {
+ return priority
+ }
+
+ optionsPriority, err := strconv.Atoi(options.Priority)
+ // This should never error out, as the value for Priority has been validated in the vtgate already.
+ // Still, handle it just to make sure.
+ if err != nil {
+ log.Errorf(
+ "The value of the %s query directive could not be converted to integer, using the "+
+ "default value. Error was: %s",
+ sqlparser.DirectivePriority, priority, err)
+
+ return priority
+ }
+
+ return optionsPriority
+}
+
// Commit commits the specified transaction.
func (tsv *TabletServer) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (newReservedID int64, err error) {
err = tsv.execRequest(
@@ -746,6 +770,7 @@ func (tsv *TabletServer) execute(ctx context.Context, target *querypb.Target, sq
bindVariables = make(map[string]*querypb.BindVariable)
}
query, comments := sqlparser.SplitMarginComments(sql)
+
plan, err := tsv.qe.GetPlan(ctx, logStats, query, skipQueryPlanCache(options))
if err != nil {
return err
@@ -863,6 +888,8 @@ func (tsv *TabletServer) streamExecute(ctx context.Context, target *querypb.Targ
if transactionID != 0 {
connID = transactionID
}
+ logStats.ReservedID = reservedID
+ logStats.TransactionID = transactionID
var connSetting *pools.Setting
if len(settings) > 0 {
@@ -1276,7 +1303,7 @@ func (tsv *TabletServer) ReserveExecute(ctx context.Context, target *querypb.Tar
return state, nil, err
}
- result, err = tsv.execute(ctx, target, sql, bindVariables, state.ReservedID, state.ReservedID, nil, options)
+ result, err = tsv.execute(ctx, target, sql, bindVariables, transactionID, state.ReservedID, nil, options)
return state, result, err
}
@@ -1326,7 +1353,7 @@ func (tsv *TabletServer) ReserveStreamExecute(
return state, err
}
- err = tsv.streamExecute(ctx, target, sql, bindVariables, state.ReservedID, state.ReservedID, nil, options, callback)
+ err = tsv.streamExecute(ctx, target, sql, bindVariables, transactionID, state.ReservedID, nil, options, callback)
return state, err
}
@@ -1401,6 +1428,7 @@ func (tsv *TabletServer) execRequest(
span, ctx := trace.NewSpan(ctx, "TabletServer."+requestName)
if options != nil {
span.Annotate("isolation-level", options.TransactionIsolation)
+ span.Annotate("workload_name", options.WorkloadName)
}
trace.AnnotateSQL(span, sqlparser.Preview(sql))
if target != nil {
@@ -1408,6 +1436,7 @@ func (tsv *TabletServer) execRequest(
span.Annotate("shard", target.Shard)
span.Annotate("keyspace", target.Keyspace)
}
+
defer span.Finish()
logStats := tabletenv.NewLogStats(ctx, requestName)
diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go
index 36e983d2ecf..7d930466f4d 100644
--- a/go/vt/vttablet/tabletserver/tabletserver_test.go
+++ b/go/vt/vttablet/tabletserver/tabletserver_test.go
@@ -761,6 +761,61 @@ func TestTabletServerStreamExecuteComments(t *testing.T) {
}
}
+func TestTabletServerBeginStreamExecute(t *testing.T) {
+ db, tsv := setupTabletServerTest(t, "")
+ defer tsv.StopService()
+ defer db.Close()
+
+ executeSQL := "select * from test_table limit 1000"
+ executeSQLResult := &sqltypes.Result{
+ Fields: []*querypb.Field{
+ {Type: sqltypes.VarBinary},
+ },
+ Rows: [][]sqltypes.Value{
+ {sqltypes.NewVarBinary("row01")},
+ },
+ }
+ db.AddQuery(executeSQL, executeSQLResult)
+
+ target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}
+ callback := func(*sqltypes.Result) error { return nil }
+ state, err := tsv.BeginStreamExecute(ctx, &target, nil, executeSQL, nil, 0, nil, callback)
+ if err != nil {
+ t.Fatalf("TabletServer.BeginStreamExecute should success: %s, but get error: %v",
+ executeSQL, err)
+ }
+ require.NoError(t, err)
+ _, err = tsv.Commit(ctx, &target, state.TransactionID)
+ require.NoError(t, err)
+}
+
+func TestTabletServerBeginStreamExecuteWithError(t *testing.T) {
+ db, tsv := setupTabletServerTest(t, "")
+ defer tsv.StopService()
+ defer db.Close()
+
+ // Enforce an error so we can validate we get one back properly
+ tsv.qe.strictTableACL = true
+
+ executeSQL := "select * from test_table limit 1000"
+ executeSQLResult := &sqltypes.Result{
+ Fields: []*querypb.Field{
+ {Type: sqltypes.VarBinary},
+ },
+ Rows: [][]sqltypes.Value{
+ {sqltypes.NewVarBinary("row01")},
+ },
+ }
+ db.AddQuery(executeSQL, executeSQLResult)
+
+ target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}
+ callback := func(*sqltypes.Result) error { return nil }
+ state, err := tsv.BeginStreamExecute(ctx, &target, nil, executeSQL, nil, 0, nil, callback)
+ require.Error(t, err)
+ err = tsv.Release(ctx, &target, state.TransactionID, 0)
+ require.NoError(t, err)
+}
+
func TestSerializeTransactionsSameRow(t *testing.T) {
// This test runs three transaction in parallel:
// tx1 | tx2 | tx3
diff --git a/go/vt/vttablet/tabletserver/throttle/client.go b/go/vt/vttablet/tabletserver/throttle/client.go
index 10fe910d264..240ac7a0260 100644
--- a/go/vt/vttablet/tabletserver/throttle/client.go
+++ b/go/vt/vttablet/tabletserver/throttle/client.go
@@ -50,7 +50,8 @@ type Client struct {
checkType ThrottleCheckType
flags CheckFlags
- lastSuccessfulThrottle int64
+ lastSuccessfulThrottleMu sync.Mutex
+ lastSuccessfulThrottle int64
}
// NewProductionClient creates a client suitable for foreground/production jobs, which have normal priority.
@@ -94,6 +95,8 @@ func (c *Client) ThrottleCheckOK(ctx context.Context, overrideAppName string) (t
// no throttler
return true
}
+ c.lastSuccessfulThrottleMu.Lock()
+ defer c.lastSuccessfulThrottleMu.Unlock()
if c.lastSuccessfulThrottle >= atomic.LoadInt64(&throttleTicks) {
// if last check was OK just very recently there is no need to check again
return true
diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go
index 0ba300c2c76..a91d171c512 100644
--- a/go/vt/vttablet/tabletserver/throttle/throttler.go
+++ b/go/vt/vttablet/tabletserver/throttle/throttler.go
@@ -508,6 +508,7 @@ func (throttler *Throttler) generateTabletHTTPProbeFunction(ctx context.Context,
mySQLThrottleMetric.Err = err
return mySQLThrottleMetric
}
+ defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
if err != nil {
mySQLThrottleMetric.Err = err
diff --git a/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go
index 17f21f7690b..84254003496 100644
--- a/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go
+++ b/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go
@@ -8,7 +8,7 @@ import (
context "context"
reflect "reflect"
- gomock "github.com/golang/mock/gomock"
+ gomock "go.uber.org/mock/gomock"
discovery "vitess.io/vitess/go/vt/discovery"
query "vitess.io/vitess/go/vt/proto/query"
diff --git a/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go
index 53b827d591a..327a37dc43f 100644
--- a/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go
+++ b/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go
@@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler (interfaces: ThrottlerInterface)
+// Source: vitess.io/vitess/go/vt/throttler (interfaces: Throttler)
// Package txthrottler is a generated GoMock package.
package txthrottler
@@ -8,49 +8,51 @@ import (
reflect "reflect"
time "time"
- gomock "github.com/golang/mock/gomock"
+ gomock "go.uber.org/mock/gomock"
discovery "vitess.io/vitess/go/vt/discovery"
throttlerdata "vitess.io/vitess/go/vt/proto/throttlerdata"
+ topodata "vitess.io/vitess/go/vt/proto/topodata"
+ throttler "vitess.io/vitess/go/vt/throttler"
)
-// MockThrottlerInterface is a mock of ThrottlerInterface interface.
-type MockThrottlerInterface struct {
+// MockThrottler is a mock of Throttler interface.
+type MockThrottler struct {
ctrl *gomock.Controller
- recorder *MockThrottlerInterfaceMockRecorder
+ recorder *MockThrottlerMockRecorder
}
-// MockThrottlerInterfaceMockRecorder is the mock recorder for MockThrottlerInterface.
-type MockThrottlerInterfaceMockRecorder struct {
- mock *MockThrottlerInterface
+// MockThrottlerMockRecorder is the mock recorder for MockThrottler.
+type MockThrottlerMockRecorder struct {
+ mock *MockThrottler
}
-// NewMockThrottlerInterface creates a new mock instance.
-func NewMockThrottlerInterface(ctrl *gomock.Controller) *MockThrottlerInterface {
- mock := &MockThrottlerInterface{ctrl: ctrl}
- mock.recorder = &MockThrottlerInterfaceMockRecorder{mock}
+// NewMockThrottler creates a new mock instance.
+func NewMockThrottler(ctrl *gomock.Controller) *MockThrottler {
+ mock := &MockThrottler{ctrl: ctrl}
+ mock.recorder = &MockThrottlerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockThrottlerInterface) EXPECT() *MockThrottlerInterfaceMockRecorder {
+func (m *MockThrottler) EXPECT() *MockThrottlerMockRecorder {
return m.recorder
}
// Close mocks base method.
-func (m *MockThrottlerInterface) Close() {
+func (m *MockThrottler) Close() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Close")
}
// Close indicates an expected call of Close.
-func (mr *MockThrottlerInterfaceMockRecorder) Close() *gomock.Call {
+func (mr *MockThrottlerMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockThrottlerInterface)(nil).Close))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockThrottler)(nil).Close))
}
// GetConfiguration mocks base method.
-func (m *MockThrottlerInterface) GetConfiguration() *throttlerdata.Configuration {
+func (m *MockThrottler) GetConfiguration() *throttlerdata.Configuration {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetConfiguration")
ret0, _ := ret[0].(*throttlerdata.Configuration)
@@ -58,13 +60,41 @@ func (m *MockThrottlerInterface) GetConfiguration() *throttlerdata.Configuration
}
// GetConfiguration indicates an expected call of GetConfiguration.
-func (mr *MockThrottlerInterfaceMockRecorder) GetConfiguration() *gomock.Call {
+func (mr *MockThrottlerMockRecorder) GetConfiguration() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfiguration", reflect.TypeOf((*MockThrottlerInterface)(nil).GetConfiguration))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfiguration", reflect.TypeOf((*MockThrottler)(nil).GetConfiguration))
+}
+
+// Log mocks base method.
+func (m *MockThrottler) Log() []throttler.Result {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Log")
+ ret0, _ := ret[0].([]throttler.Result)
+ return ret0
+}
+
+// Log indicates an expected call of Log.
+func (mr *MockThrottlerMockRecorder) Log() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log", reflect.TypeOf((*MockThrottler)(nil).Log))
+}
+
+// MaxLag mocks base method.
+func (m *MockThrottler) MaxLag(arg0 topodata.TabletType) uint32 {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MaxLag", arg0)
+ ret0, _ := ret[0].(uint32)
+ return ret0
+}
+
+// MaxLag indicates an expected call of MaxLag.
+func (mr *MockThrottlerMockRecorder) MaxLag(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxLag", reflect.TypeOf((*MockThrottler)(nil).MaxLag), arg0)
}
// MaxRate mocks base method.
-func (m *MockThrottlerInterface) MaxRate() int64 {
+func (m *MockThrottler) MaxRate() int64 {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MaxRate")
ret0, _ := ret[0].(int64)
@@ -72,61 +102,61 @@ func (m *MockThrottlerInterface) MaxRate() int64 {
}
// MaxRate indicates an expected call of MaxRate.
-func (mr *MockThrottlerInterfaceMockRecorder) MaxRate() *gomock.Call {
+func (mr *MockThrottlerMockRecorder) MaxRate() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxRate", reflect.TypeOf((*MockThrottlerInterface)(nil).MaxRate))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxRate", reflect.TypeOf((*MockThrottler)(nil).MaxRate))
}
// RecordReplicationLag mocks base method.
-func (m *MockThrottlerInterface) RecordReplicationLag(arg0 time.Time, arg1 *discovery.TabletHealth) {
+func (m *MockThrottler) RecordReplicationLag(arg0 time.Time, arg1 *discovery.TabletHealth) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "RecordReplicationLag", arg0, arg1)
}
// RecordReplicationLag indicates an expected call of RecordReplicationLag.
-func (mr *MockThrottlerInterfaceMockRecorder) RecordReplicationLag(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockThrottlerMockRecorder) RecordReplicationLag(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordReplicationLag", reflect.TypeOf((*MockThrottlerInterface)(nil).RecordReplicationLag), arg0, arg1)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordReplicationLag", reflect.TypeOf((*MockThrottler)(nil).RecordReplicationLag), arg0, arg1)
}
// ResetConfiguration mocks base method.
-func (m *MockThrottlerInterface) ResetConfiguration() {
+func (m *MockThrottler) ResetConfiguration() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "ResetConfiguration")
}
// ResetConfiguration indicates an expected call of ResetConfiguration.
-func (mr *MockThrottlerInterfaceMockRecorder) ResetConfiguration() *gomock.Call {
+func (mr *MockThrottlerMockRecorder) ResetConfiguration() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetConfiguration", reflect.TypeOf((*MockThrottlerInterface)(nil).ResetConfiguration))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetConfiguration", reflect.TypeOf((*MockThrottler)(nil).ResetConfiguration))
}
// SetMaxRate mocks base method.
-func (m *MockThrottlerInterface) SetMaxRate(arg0 int64) {
+func (m *MockThrottler) SetMaxRate(arg0 int64) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetMaxRate", arg0)
}
// SetMaxRate indicates an expected call of SetMaxRate.
-func (mr *MockThrottlerInterfaceMockRecorder) SetMaxRate(arg0 interface{}) *gomock.Call {
+func (mr *MockThrottlerMockRecorder) SetMaxRate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMaxRate", reflect.TypeOf((*MockThrottlerInterface)(nil).SetMaxRate), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMaxRate", reflect.TypeOf((*MockThrottler)(nil).SetMaxRate), arg0)
}
// ThreadFinished mocks base method.
-func (m *MockThrottlerInterface) ThreadFinished(arg0 int) {
+func (m *MockThrottler) ThreadFinished(arg0 int) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "ThreadFinished", arg0)
}
// ThreadFinished indicates an expected call of ThreadFinished.
-func (mr *MockThrottlerInterfaceMockRecorder) ThreadFinished(arg0 interface{}) *gomock.Call {
+func (mr *MockThrottlerMockRecorder) ThreadFinished(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ThreadFinished", reflect.TypeOf((*MockThrottlerInterface)(nil).ThreadFinished), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ThreadFinished", reflect.TypeOf((*MockThrottler)(nil).ThreadFinished), arg0)
}
// Throttle mocks base method.
-func (m *MockThrottlerInterface) Throttle(arg0 int) time.Duration {
+func (m *MockThrottler) Throttle(arg0 int) time.Duration {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Throttle", arg0)
ret0, _ := ret[0].(time.Duration)
@@ -134,13 +164,13 @@ func (m *MockThrottlerInterface) Throttle(arg0 int) time.Duration {
}
// Throttle indicates an expected call of Throttle.
-func (mr *MockThrottlerInterfaceMockRecorder) Throttle(arg0 interface{}) *gomock.Call {
+func (mr *MockThrottlerMockRecorder) Throttle(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Throttle", reflect.TypeOf((*MockThrottlerInterface)(nil).Throttle), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Throttle", reflect.TypeOf((*MockThrottler)(nil).Throttle), arg0)
}
// UpdateConfiguration mocks base method.
-func (m *MockThrottlerInterface) UpdateConfiguration(arg0 *throttlerdata.Configuration, arg1 bool) error {
+func (m *MockThrottler) UpdateConfiguration(arg0 *throttlerdata.Configuration, arg1 bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateConfiguration", arg0, arg1)
ret0, _ := ret[0].(error)
@@ -148,7 +178,7 @@ func (m *MockThrottlerInterface) UpdateConfiguration(arg0 *throttlerdata.Configu
}
// UpdateConfiguration indicates an expected call of UpdateConfiguration.
-func (mr *MockThrottlerInterfaceMockRecorder) UpdateConfiguration(arg0, arg1 interface{}) *gomock.Call {
+func (mr *MockThrottlerMockRecorder) UpdateConfiguration(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConfiguration", reflect.TypeOf((*MockThrottlerInterface)(nil).UpdateConfiguration), arg0, arg1)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConfiguration", reflect.TypeOf((*MockThrottler)(nil).UpdateConfiguration), arg0, arg1)
}
diff --git a/go/vt/vttablet/tabletserver/txthrottler/mock_topology_watcher_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_topology_watcher_test.go
deleted file mode 100644
index 5afb16d3473..00000000000
--- a/go/vt/vttablet/tabletserver/txthrottler/mock_topology_watcher_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler (interfaces: TopologyWatcherInterface)
-
-// Package txthrottler is a generated GoMock package.
-package txthrottler
-
-import (
- reflect "reflect"
-
- gomock "github.com/golang/mock/gomock"
-)
-
-// MockTopologyWatcherInterface is a mock of TopologyWatcherInterface interface.
-type MockTopologyWatcherInterface struct {
- ctrl *gomock.Controller
- recorder *MockTopologyWatcherInterfaceMockRecorder
-}
-
-// MockTopologyWatcherInterfaceMockRecorder is the mock recorder for MockTopologyWatcherInterface.
-type MockTopologyWatcherInterfaceMockRecorder struct {
- mock *MockTopologyWatcherInterface
-}
-
-// NewMockTopologyWatcherInterface creates a new mock instance.
-func NewMockTopologyWatcherInterface(ctrl *gomock.Controller) *MockTopologyWatcherInterface {
- mock := &MockTopologyWatcherInterface{ctrl: ctrl}
- mock.recorder = &MockTopologyWatcherInterfaceMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockTopologyWatcherInterface) EXPECT() *MockTopologyWatcherInterfaceMockRecorder {
- return m.recorder
-}
-
-// Start mocks base method.
-func (m *MockTopologyWatcherInterface) Start() {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "Start")
-}
-
-// Start indicates an expected call of Start.
-func (mr *MockTopologyWatcherInterfaceMockRecorder) Start() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockTopologyWatcherInterface)(nil).Start))
-}
-
-// Stop mocks base method.
-func (m *MockTopologyWatcherInterface) Stop() {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "Stop")
-}
-
-// Stop indicates an expected call of Stop.
-func (mr *MockTopologyWatcherInterfaceMockRecorder) Stop() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockTopologyWatcherInterface)(nil).Stop))
-}
diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go
index 5b724ca97cf..70d92aad3a7 100644
--- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go
+++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go
@@ -17,29 +17,81 @@ limitations under the License.
package txthrottler
import (
- "fmt"
+ "context"
+ "math/rand"
+ "reflect"
"strings"
"sync"
+ "sync/atomic"
"time"
"google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/encoding/prototext"
-
- "context"
-
+ "vitess.io/vitess/go/stats"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/throttler"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
querypb "vitess.io/vitess/go/vt/proto/query"
- throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
)
-// TxThrottler throttles transactions based on replication lag.
+// These vars store the functions used to create the topo server, healthcheck,
+// and go/vt/throttler. These are provided here so that they can be overridden
+// in tests to generate mocks.
+type healthCheckFactoryFunc func(ctx context.Context, topoServer *topo.Server, cell, keyspace, shard string, cellsToWatch []string) (discovery.HealthCheck, error)
+type throttlerFactoryFunc func(name, unit string, threadCount int, maxRate int64, maxReplicationLagConfig throttler.MaxReplicationLagModuleConfig) (throttler.Throttler, error)
+
+var (
+ healthCheckFactory healthCheckFactoryFunc
+ throttlerFactory throttlerFactoryFunc
+)
+
+func resetTxThrottlerFactories() {
+ healthCheckFactory = func(ctx context.Context, topoServer *topo.Server, cell, keyspace, shard string, cellsToWatch []string) (discovery.HealthCheck, error) {
+ // discovery.NewFilterByShard expects a single-shard filter to be in "keyspace|shard" format.
+ filter, err := discovery.NewFilterByShard([]string{keyspace + "|" + shard})
+ if err != nil {
+ return nil, err
+ }
+ return discovery.NewHealthCheck(ctx, discovery.DefaultHealthCheckRetryDelay, discovery.DefaultHealthCheckTimeout, topoServer, cell, strings.Join(cellsToWatch, ","), filter), nil
+ }
+ throttlerFactory = func(name, unit string, threadCount int, maxRate int64, maxReplicationLagConfig throttler.MaxReplicationLagModuleConfig) (throttler.Throttler, error) {
+ return throttler.NewThrottlerFromConfig(name, unit, threadCount, maxRate, maxReplicationLagConfig, time.Now)
+ }
+}
+
+func init() {
+ resetTxThrottlerFactories()
+}
+
+// TxThrottler defines the interface for the transaction throttler.
+type TxThrottler interface {
+ InitDBConfig(target *querypb.Target)
+ Open() (err error)
+ Close()
+ Throttle(priority int, workload string) (result bool)
+}
+
+// TxThrottlerName is the name the wrapped go/vt/throttler object will be registered with
+// go/vt/throttler.GlobalManager.
+const TxThrottlerName = "TransactionThrottler"
+
+// fetchKnownCells gathers a list of known cells from the topology. On error,
+// the cell of the local tablet will be used and an error is logged.
+func fetchKnownCells(ctx context.Context, topoServer *topo.Server, target *querypb.Target) []string {
+ cells, err := topoServer.GetKnownCells(ctx)
+ if err != nil {
+ log.Errorf("txThrottler: falling back to local cell due to error fetching cells from topology: %+v", err)
+ cells = []string{target.Cell}
+ }
+ return cells
+}
+
+// txThrottler implements TxThrottle for throttling transactions based on replication lag.
// It's a thin wrapper around the throttler found in vitess/go/vt/throttler.
// It uses a discovery.HealthCheck to send replication-lag updates to the wrapped throttler.
//
@@ -63,187 +115,112 @@ import (
// // To release the resources used by the throttler the caller should call Close().
// t.Close()
//
-// A TxThrottler object is generally not thread-safe: at any given time at most one goroutine should
+// A txThrottler object is generally not thread-safe: at any given time at most one goroutine should
// be executing a method. The only exception is the 'Throttle' method where multiple goroutines are
// allowed to execute it concurrently.
-type TxThrottler struct {
- // config stores the transaction throttler's configuration.
- // It is populated in NewTxThrottler and is not modified
- // since.
- config *txThrottlerConfig
+type txThrottler struct {
+ config *tabletenv.TabletConfig
// state holds an open transaction throttler state. It is nil
// if the TransactionThrottler is closed.
- state *txThrottlerState
+ state txThrottlerState
- target *querypb.Target
-}
+ target *querypb.Target
+ topoServer *topo.Server
-// NewTxThrottler tries to construct a TxThrottler from the
-// relevant fields in the tabletenv.Config object. It returns a disabled TxThrottler if
-// any error occurs.
-// This function calls tryCreateTxThrottler that does the actual creation work
-// and returns an error if one occurred.
-func NewTxThrottler(config *tabletenv.TabletConfig, topoServer *topo.Server) *TxThrottler {
- txThrottler, err := tryCreateTxThrottler(config, topoServer)
- if err != nil {
- log.Errorf("Error creating transaction throttler. Transaction throttling will"+
- " be disabled. Error: %v", err)
- txThrottler, err = newTxThrottler(&txThrottlerConfig{enabled: false})
- if err != nil {
- panic("BUG: Can't create a disabled transaction throttler")
- }
- } else {
- log.Infof("Initialized transaction throttler with config: %+v", txThrottler.config)
- }
- return txThrottler
+ // stats
+ throttlerRunning *stats.Gauge
+ healthChecksReadTotal *stats.CountersWithMultiLabels
+ healthChecksRecordedTotal *stats.CountersWithMultiLabels
+ requestsTotal *stats.CountersWithSingleLabel
+ requestsThrottled *stats.CountersWithSingleLabel
}
-// InitDBConfig initializes the target parameters for the throttler.
-func (t *TxThrottler) InitDBConfig(target *querypb.Target) {
- t.target = proto.Clone(target).(*querypb.Target)
+type txThrottlerState interface {
+ deallocateResources()
+ StatsUpdate(tabletStats *discovery.TabletHealth)
+ throttle() bool
}
-func tryCreateTxThrottler(config *tabletenv.TabletConfig, topoServer *topo.Server) (*TxThrottler, error) {
- if !config.EnableTxThrottler {
- return newTxThrottler(&txThrottlerConfig{enabled: false})
- }
-
- var throttlerConfig throttlerdatapb.Configuration
- if err := prototext.Unmarshal([]byte(config.TxThrottlerConfig), &throttlerConfig); err != nil {
- return nil, err
- }
-
- // Clone tsv.TxThrottlerHealthCheckCells so that we don't assume tsv.TxThrottlerHealthCheckCells
- // is immutable.
- healthCheckCells := make([]string, len(config.TxThrottlerHealthCheckCells))
- copy(healthCheckCells, config.TxThrottlerHealthCheckCells)
+// txThrottlerStateImpl holds the state of an open TxThrottler object.
+type txThrottlerStateImpl struct {
+ config *tabletenv.TabletConfig
+ txThrottler *txThrottler
- return newTxThrottler(&txThrottlerConfig{
- enabled: true,
- topoServer: topoServer,
- throttlerConfig: &throttlerConfig,
- healthCheckCells: healthCheckCells,
- })
-}
-
-// txThrottlerConfig holds the parameters that need to be
-// passed when constructing a TxThrottler object.
-type txThrottlerConfig struct {
- // enabled is true if the transaction throttler is enabled. All methods
- // of a disabled transaction throttler do nothing and Throttle() always
- // returns false.
- enabled bool
-
- topoServer *topo.Server
- throttlerConfig *throttlerdatapb.Configuration
- // healthCheckCells stores the cell names in which running vttablets will be monitored for
- // replication lag.
- healthCheckCells []string
-}
-
-// ThrottlerInterface defines the public interface that is implemented by go/vt/throttler.Throttler
-// It is only used here to allow mocking out a throttler object.
-type ThrottlerInterface interface {
- Throttle(threadID int) time.Duration
- ThreadFinished(threadID int)
- Close()
- MaxRate() int64
- SetMaxRate(rate int64)
- RecordReplicationLag(time time.Time, th *discovery.TabletHealth)
- GetConfiguration() *throttlerdatapb.Configuration
- UpdateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error
- ResetConfiguration()
-}
-
-// TopologyWatcherInterface defines the public interface that is implemented by
-// discovery.LegacyTopologyWatcher. It is only used here to allow mocking out
-// go/vt/discovery.LegacyTopologyWatcher.
-type TopologyWatcherInterface interface {
- Start()
- Stop()
-}
-
-// txThrottlerState holds the state of an open TxThrottler object.
-type txThrottlerState struct {
// throttleMu serializes calls to throttler.Throttler.Throttle(threadId).
// That method is required to be called in serial for each threadId.
- throttleMu sync.Mutex
- throttler ThrottlerInterface
- stopHealthCheck context.CancelFunc
+ throttleMu sync.Mutex
+ throttler throttler.Throttler
+
+ ctx context.Context
+ cancel context.CancelFunc
healthCheck discovery.HealthCheck
- topologyWatchers []TopologyWatcherInterface
-}
+ healthCheckChan chan *discovery.TabletHealth
+ healthCheckCells []string
+ cellsFromTopo bool
-// These vars store the functions used to create the topo server, healthcheck,
-// topology watchers and go/vt/throttler. These are provided here so that they can be overridden
-// in tests to generate mocks.
-type healthCheckFactoryFunc func(topoServer *topo.Server, cell string, cellsToWatch []string) discovery.HealthCheck
-type topologyWatcherFactoryFunc func(topoServer *topo.Server, hc discovery.HealthCheck, cell, keyspace, shard string, refreshInterval time.Duration, topoReadConcurrency int) TopologyWatcherInterface
-type throttlerFactoryFunc func(name, unit string, threadCount int, maxRate, maxReplicationLag int64) (ThrottlerInterface, error)
+ // tabletTypes stores the tablet types for throttling
+ tabletTypes map[topodatapb.TabletType]bool
-var (
- healthCheckFactory healthCheckFactoryFunc
- topologyWatcherFactory topologyWatcherFactoryFunc
- throttlerFactory throttlerFactoryFunc
-)
-
-func init() {
- resetTxThrottlerFactories()
+ maxLag int64
+ done chan bool
+ waitForTermination sync.WaitGroup
}
-func resetTxThrottlerFactories() {
- healthCheckFactory = func(topoServer *topo.Server, cell string, cellsToWatch []string) discovery.HealthCheck {
- return discovery.NewHealthCheck(context.Background(), discovery.DefaultHealthCheckRetryDelay, discovery.DefaultHealthCheckTimeout, topoServer, cell, strings.Join(cellsToWatch, ","))
- }
- topologyWatcherFactory = func(topoServer *topo.Server, hc discovery.HealthCheck, cell, keyspace, shard string, refreshInterval time.Duration, topoReadConcurrency int) TopologyWatcherInterface {
- return discovery.NewCellTabletsWatcher(context.Background(), topoServer, hc, discovery.NewFilterByKeyspace([]string{keyspace}), cell, refreshInterval, true, topoReadConcurrency)
+// NewTxThrottler tries to construct a txThrottler from the relevant
+// fields in the tabletenv.Env and topo.Server objects.
+func NewTxThrottler(env tabletenv.Env, topoServer *topo.Server) TxThrottler {
+ config := env.Config()
+ if config.EnableTxThrottler {
+ if len(config.TxThrottlerHealthCheckCells) == 0 {
+ defer log.Infof("Initialized transaction throttler using tabletTypes: %+v, cellsFromTopo: true, topoRefreshInterval: %s, throttlerConfig: %q",
+ config.TxThrottlerTabletTypes, config.TxThrottlerTopoRefreshInterval, config.TxThrottlerConfig.Get(),
+ )
+ } else {
+ defer log.Infof("Initialized transaction throttler using tabletTypes: %+v, healthCheckCells: %+v, throttlerConfig: %q",
+ config.TxThrottlerTabletTypes, config.TxThrottlerHealthCheckCells, config.TxThrottlerConfig.Get(),
+ )
+ }
}
- throttlerFactory = func(name, unit string, threadCount int, maxRate, maxReplicationLag int64) (ThrottlerInterface, error) {
- return throttler.NewThrottler(name, unit, threadCount, maxRate, maxReplicationLag)
+
+ return &txThrottler{
+ config: config,
+ topoServer: topoServer,
+ throttlerRunning: env.Exporter().NewGauge(TxThrottlerName+"Running", "transaction throttler running state"),
+ healthChecksReadTotal: env.Exporter().NewCountersWithMultiLabels(TxThrottlerName+"HealthchecksRead", "transaction throttler healthchecks read",
+ []string{"cell", "DbType"}),
+ healthChecksRecordedTotal: env.Exporter().NewCountersWithMultiLabels(TxThrottlerName+"HealthchecksRecorded", "transaction throttler healthchecks recorded",
+ []string{"cell", "DbType"}),
+ requestsTotal: env.Exporter().NewCountersWithSingleLabel(TxThrottlerName+"Requests", "transaction throttler requests", "workload"),
+ requestsThrottled: env.Exporter().NewCountersWithSingleLabel(TxThrottlerName+"Throttled", "transaction throttler requests throttled", "workload"),
}
}
-// TxThrottlerName is the name the wrapped go/vt/throttler object will be registered with
-// go/vt/throttler.GlobalManager.
-const TxThrottlerName = "TransactionThrottler"
-
-func newTxThrottler(config *txThrottlerConfig) (*TxThrottler, error) {
- if config.enabled {
- // Verify config.
- err := throttler.MaxReplicationLagModuleConfig{Configuration: config.throttlerConfig}.Verify()
- if err != nil {
- return nil, err
- }
- if len(config.healthCheckCells) == 0 {
- return nil, fmt.Errorf("empty healthCheckCells given. %+v", config)
- }
- }
- return &TxThrottler{
- config: config,
- }, nil
+// InitDBConfig initializes the target parameters for the throttler.
+func (t *txThrottler) InitDBConfig(target *querypb.Target) {
+ t.target = proto.Clone(target).(*querypb.Target)
}
// Open opens the transaction throttler. It must be called prior to 'Throttle'.
-func (t *TxThrottler) Open() error {
- if !t.config.enabled {
+func (t *txThrottler) Open() (err error) {
+ if !t.config.EnableTxThrottler {
return nil
}
if t.state != nil {
return nil
}
- log.Info("TxThrottler: opening")
- var err error
- t.state, err = newTxThrottlerState(t.config, t.target.Keyspace, t.target.Shard, t.target.Cell)
+ log.Info("txThrottler: opening")
+ t.throttlerRunning.Set(1)
+ t.state, err = newTxThrottlerState(t, t.config, t.target)
return err
}
-// Close closes the TxThrottler object and releases resources.
+// Close closes the txThrottler object and releases resources.
// It should be called after the throttler is no longer needed.
// It's ok to call this method on a closed throttler--in which case the method does nothing.
-func (t *TxThrottler) Close() {
- if !t.config.enabled {
+func (t *txThrottler) Close() {
+ if !t.config.EnableTxThrottler {
return
}
if t.state == nil {
@@ -251,114 +228,206 @@ func (t *TxThrottler) Close() {
}
t.state.deallocateResources()
t.state = nil
- log.Info("TxThrottler: closed")
+ t.throttlerRunning.Set(0)
+ log.Info("txThrottler: closed")
}
// Throttle should be called before a new transaction is started.
// It returns true if the transaction should not proceed (the caller
// should back off). Throttle requires that Open() was previously called
// successfully.
-func (t *TxThrottler) Throttle() (result bool) {
- if !t.config.enabled {
+func (t *txThrottler) Throttle(priority int, workload string) (result bool) {
+ if !t.config.EnableTxThrottler {
return false
}
if t.state == nil {
- panic("BUG: Throttle() called on a closed TxThrottler")
+ return false
+ }
+
+ // Throttle according to both what the throttler state says and the priority. Workloads with lower priority value
+ // are less likely to be throttled.
+ result = rand.Intn(sqlparser.MaxPriorityValue) < priority && t.state.throttle()
+
+ t.requestsTotal.Add(workload, 1)
+ if result {
+ t.requestsThrottled.Add(workload, 1)
}
- return t.state.throttle()
+
+ return result && !t.config.TxThrottlerDryRun
}
-func newTxThrottlerState(config *txThrottlerConfig, keyspace, shard, cell string) (*txThrottlerState, error) {
+func newTxThrottlerState(txThrottler *txThrottler, config *tabletenv.TabletConfig, target *querypb.Target) (txThrottlerState, error) {
+ maxReplicationLagModuleConfig := throttler.MaxReplicationLagModuleConfig{Configuration: config.TxThrottlerConfig.Get()}
+
t, err := throttlerFactory(
TxThrottlerName,
"TPS", /* unit */
1, /* threadCount */
throttler.MaxRateModuleDisabled, /* maxRate */
- config.throttlerConfig.MaxReplicationLagSec /* maxReplicationLag */)
+ maxReplicationLagModuleConfig,
+ )
if err != nil {
return nil, err
}
- if err := t.UpdateConfiguration(config.throttlerConfig, true /* copyZeroValues */); err != nil {
+ if err := t.UpdateConfiguration(config.TxThrottlerConfig.Get(), true /* copyZeroValues */); err != nil {
t.Close()
return nil, err
}
- result := &txThrottlerState{
- throttler: t,
+
+ tabletTypes := make(map[topodatapb.TabletType]bool, len(*config.TxThrottlerTabletTypes))
+ for _, tabletType := range *config.TxThrottlerTabletTypes {
+ tabletTypes[tabletType] = true
}
- createTxThrottlerHealthCheck(config, result, cell)
-
- result.topologyWatchers = make(
- []TopologyWatcherInterface, 0, len(config.healthCheckCells))
- for _, cell := range config.healthCheckCells {
- result.topologyWatchers = append(
- result.topologyWatchers,
- topologyWatcherFactory(
- config.topoServer,
- result.healthCheck,
- cell,
- keyspace,
- shard,
- discovery.DefaultTopologyWatcherRefreshInterval,
- discovery.DefaultTopoReadConcurrency))
+
+ ctx, cancel := context.WithCancel(context.Background())
+ state := &txThrottlerStateImpl{
+ ctx: ctx,
+ cancel: cancel,
+ config: config,
+ healthCheckCells: config.TxThrottlerHealthCheckCells,
+ tabletTypes: tabletTypes,
+ throttler: t,
+ txThrottler: txThrottler,
+ done: make(chan bool, 1),
+ }
+
+ // get cells from topo if none defined in tabletenv config
+ if len(state.healthCheckCells) == 0 {
+ cellsCtx, cellsCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)
+ defer cellsCancel()
+ state.healthCheckCells = fetchKnownCells(cellsCtx, txThrottler.topoServer, target)
+ state.cellsFromTopo = true
}
- return result, nil
+
+ if err := state.initHealthCheckStream(txThrottler.topoServer, target); err != nil {
+ return nil, err
+ }
+ state.healthCheck.RegisterStats()
+ go state.healthChecksProcessor(txThrottler.topoServer, target)
+ state.waitForTermination.Add(1)
+ go state.updateMaxLag()
+
+ return state, nil
}
-func createTxThrottlerHealthCheck(config *txThrottlerConfig, result *txThrottlerState, cell string) {
- ctx, cancel := context.WithCancel(context.Background())
- result.stopHealthCheck = cancel
- result.healthCheck = healthCheckFactory(config.topoServer, cell, config.healthCheckCells)
- ch := result.healthCheck.Subscribe()
- go func(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case th := <-ch:
- result.StatsUpdate(th)
+func (ts *txThrottlerStateImpl) initHealthCheckStream(topoServer *topo.Server, target *querypb.Target) (err error) {
+ ts.healthCheck, err = healthCheckFactory(ts.ctx, topoServer, target.Cell, target.Keyspace, target.Shard, ts.healthCheckCells)
+ if err != nil {
+ return err
+ }
+ ts.healthCheckChan = ts.healthCheck.Subscribe()
+ return nil
+}
+
+func (ts *txThrottlerStateImpl) closeHealthCheckStream() {
+ if ts.healthCheck == nil {
+ return
+ }
+ ts.cancel()
+ ts.healthCheck.Close()
+}
+
+func (ts *txThrottlerStateImpl) updateHealthCheckCells(topoServer *topo.Server, target *querypb.Target) error {
+ fetchCtx, cancel := context.WithTimeout(ts.ctx, topo.RemoteOperationTimeout)
+ defer cancel()
+
+ knownCells := fetchKnownCells(fetchCtx, topoServer, target)
+ if !reflect.DeepEqual(knownCells, ts.healthCheckCells) {
+ log.Info("txThrottler: restarting healthcheck stream due to topology cells update")
+ ts.healthCheckCells = knownCells
+ ts.closeHealthCheckStream()
+ return ts.initHealthCheckStream(topoServer, target)
+ }
+ return nil
+}
+
+func (ts *txThrottlerStateImpl) healthChecksProcessor(topoServer *topo.Server, target *querypb.Target) {
+ var cellsUpdateTicks <-chan time.Time
+ if ts.cellsFromTopo {
+ ticker := time.NewTicker(ts.config.TxThrottlerTopoRefreshInterval)
+ cellsUpdateTicks = ticker.C
+ defer ticker.Stop()
+ }
+ for {
+ select {
+ case <-ts.ctx.Done():
+ return
+ case <-cellsUpdateTicks:
+ if err := ts.updateHealthCheckCells(topoServer, target); err != nil {
+ log.Errorf("txThrottler: failed to update cell list: %+v", err)
}
+ case th := <-ts.healthCheckChan:
+ ts.StatsUpdate(th)
}
- }(ctx)
+ }
}
-func (ts *txThrottlerState) throttle() bool {
+func (ts *txThrottlerStateImpl) throttle() bool {
if ts.throttler == nil {
- panic("BUG: throttle called after deallocateResources was called.")
+ log.Error("txThrottler: throttle called after deallocateResources was called")
+ return false
}
// Serialize calls to ts.throttle.Throttle()
ts.throttleMu.Lock()
defer ts.throttleMu.Unlock()
- return ts.throttler.Throttle(0 /* threadId */) > 0
+
+ maxLag := atomic.LoadInt64(&ts.maxLag)
+
+ return maxLag > ts.config.TxThrottlerConfig.TargetReplicationLagSec &&
+ ts.throttler.Throttle(0 /* threadId */) > 0
}
-func (ts *txThrottlerState) deallocateResources() {
- // We don't really need to nil out the fields here
- // as deallocateResources is not expected to be called
- // more than once, but it doesn't hurt to do so.
- for _, watcher := range ts.topologyWatchers {
- watcher.Stop()
+func (ts *txThrottlerStateImpl) updateMaxLag() {
+ defer ts.waitForTermination.Done()
+ // We use half of the target lag to ensure we have enough resolution to see changes in lag below that value
+ ticker := time.NewTicker(time.Duration(ts.config.TxThrottlerConfig.TargetReplicationLagSec/2) * time.Second)
+ defer ticker.Stop()
+outerloop:
+ for {
+ select {
+ case <-ticker.C:
+ var maxLag uint32
+
+ for tabletType := range ts.tabletTypes {
+ maxLagPerTabletType := ts.throttler.MaxLag(tabletType)
+ if maxLagPerTabletType > maxLag {
+ maxLag = maxLagPerTabletType
+ }
+ }
+ atomic.StoreInt64(&ts.maxLag, int64(maxLag))
+ case <-ts.done:
+ break outerloop
+ }
}
- ts.topologyWatchers = nil
+}
- ts.healthCheck.Close()
+func (ts *txThrottlerStateImpl) deallocateResources() {
+ // Close healthcheck and topo watchers
+ ts.closeHealthCheckStream()
ts.healthCheck = nil
- // After ts.healthCheck is closed txThrottlerState.StatsUpdate() is guaranteed not
+ ts.done <- true
+ ts.waitForTermination.Wait()
+ // After ts.healthCheck is closed txThrottlerStateImpl.StatsUpdate() is guaranteed not
// to be executing, so we can safely close the throttler.
ts.throttler.Close()
ts.throttler = nil
}
// StatsUpdate updates the health of a tablet with the given healthcheck.
-func (ts *txThrottlerState) StatsUpdate(tabletStats *discovery.TabletHealth) {
- // Ignore PRIMARY and RDONLY stats.
- // We currently do not monitor RDONLY tablets for replication lag. RDONLY tablets are not
- // candidates for becoming primary during failover, and it's acceptable to serve somewhat
- // stale date from these.
- // TODO(erez): If this becomes necessary, we can add a configuration option that would
- // determine whether we consider RDONLY tablets here, as well.
- if tabletStats.Target.TabletType != topodatapb.TabletType_REPLICA {
+func (ts *txThrottlerStateImpl) StatsUpdate(tabletStats *discovery.TabletHealth) {
+ if len(ts.tabletTypes) == 0 {
return
}
- ts.throttler.RecordReplicationLag(time.Now(), tabletStats)
+
+ tabletType := tabletStats.Target.TabletType
+ metricLabels := []string{tabletStats.Target.Cell, tabletType.String()}
+ ts.txThrottler.healthChecksReadTotal.Add(metricLabels, 1)
+
+ // Monitor tablets for replication lag if they have a tablet
+ // type specified by the --tx-throttler-tablet-types flag.
+ if ts.tabletTypes[tabletType] {
+ ts.throttler.RecordReplicationLag(time.Now(), tabletStats)
+ ts.txThrottler.healthChecksRecordedTotal.Add(metricLabels, 1)
+ }
}
diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go
index 1606fa2cf4c..c595224cb81 100644
--- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go
+++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go
@@ -18,18 +18,22 @@ package txthrottler
// Commands to generate the mocks for this test.
//go:generate mockgen -destination mock_healthcheck_test.go -package txthrottler -mock_names "HealthCheck=MockHealthCheck" vitess.io/vitess/go/vt/discovery HealthCheck
-//go:generate mockgen -destination mock_throttler_test.go -package txthrottler vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler ThrottlerInterface
-//go:generate mockgen -destination mock_topology_watcher_test.go -package txthrottler vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler TopologyWatcherInterface
+//go:generate mockgen -destination mock_throttler_test.go -package txthrottler vitess.io/vitess/go/vt/throttler Throttler
import (
+ "context"
+ "sync/atomic"
"testing"
"time"
- "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/mock/gomock"
"vitess.io/vitess/go/vt/discovery"
+ "vitess.io/vitess/go/vt/throttler"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -39,17 +43,16 @@ import (
func TestDisabledThrottler(t *testing.T) {
config := tabletenv.NewDefaultConfig()
config.EnableTxThrottler = false
- throttler := NewTxThrottler(config, nil)
+ env := tabletenv.NewEnv(config, t.Name())
+ throttler := NewTxThrottler(env, nil)
throttler.InitDBConfig(&querypb.Target{
Keyspace: "keyspace",
Shard: "shard",
})
- if err := throttler.Open(); err != nil {
- t.Fatalf("want: nil, got: %v", err)
- }
- if result := throttler.Throttle(); result != false {
- t.Errorf("want: false, got: %v", result)
- }
+ assert.Nil(t, throttler.Open())
+ assert.False(t, throttler.Throttle(0, "some-workload"))
+ throttlerImpl, _ := throttler.(*txThrottler)
+ assert.Zero(t, throttlerImpl.throttlerRunning.Get())
throttler.Close()
}
@@ -63,84 +66,185 @@ func TestEnabledThrottler(t *testing.T) {
mockHealthCheck := NewMockHealthCheck(mockCtrl)
hcCall1 := mockHealthCheck.EXPECT().Subscribe()
hcCall1.Do(func() {})
- hcCall2 := mockHealthCheck.EXPECT().Close()
+ hcCall2 := mockHealthCheck.EXPECT().RegisterStats()
+ hcCall2.Do(func() {})
hcCall2.After(hcCall1)
- healthCheckFactory = func(topoServer *topo.Server, cell string, cellsToWatch []string) discovery.HealthCheck {
- return mockHealthCheck
+ hcCall3 := mockHealthCheck.EXPECT().Close()
+ hcCall3.After(hcCall2)
+ healthCheckFactory = func(ctx context.Context, topoServer *topo.Server, cell, keyspace, shard string, cellsToWatch []string) (discovery.HealthCheck, error) {
+ return mockHealthCheck, nil
}
- topologyWatcherFactory = func(topoServer *topo.Server, hc discovery.HealthCheck, cell, keyspace, shard string, refreshInterval time.Duration, topoReadConcurrency int) TopologyWatcherInterface {
- if ts != topoServer {
- t.Errorf("want: %v, got: %v", ts, topoServer)
- }
- if cell != "cell1" && cell != "cell2" {
- t.Errorf("want: cell1 or cell2, got: %v", cell)
- }
- if keyspace != "keyspace" {
- t.Errorf("want: keyspace, got: %v", keyspace)
- }
- if shard != "shard" {
- t.Errorf("want: shard, got: %v", shard)
- }
- result := NewMockTopologyWatcherInterface(mockCtrl)
- result.EXPECT().Stop()
- return result
- }
-
- mockThrottler := NewMockThrottlerInterface(mockCtrl)
- throttlerFactory = func(name, unit string, threadCount int, maxRate, maxReplicationLag int64) (ThrottlerInterface, error) {
- if threadCount != 1 {
- t.Errorf("want: 1, got: %v", threadCount)
- }
+ mockThrottler := NewMockThrottler(mockCtrl)
+ throttlerFactory = func(name, unit string, threadCount int, maxRate int64, maxReplicationLagConfig throttler.MaxReplicationLagModuleConfig) (throttler.Throttler, error) {
+ assert.Equal(t, 1, threadCount)
return mockThrottler, nil
}
- call0 := mockThrottler.EXPECT().UpdateConfiguration(gomock.Any(), true /* copyZeroValues */)
- call1 := mockThrottler.EXPECT().Throttle(0)
- call1.Return(0 * time.Second)
+ var calls []*gomock.Call
+
+ call := mockThrottler.EXPECT().UpdateConfiguration(gomock.Any(), true /* copyZeroValues */)
+ calls = append(calls, call)
+
+ // 1
+ call = mockThrottler.EXPECT().Throttle(0)
+ call.Return(0 * time.Second)
+ calls = append(calls, call)
+
tabletStats := &discovery.TabletHealth{
Target: &querypb.Target{
+ Cell: "cell1",
TabletType: topodatapb.TabletType_REPLICA,
},
}
- call2 := mockThrottler.EXPECT().RecordReplicationLag(gomock.Any(), tabletStats)
- call3 := mockThrottler.EXPECT().Throttle(0)
- call3.Return(1 * time.Second)
- call4 := mockThrottler.EXPECT().Close()
- call1.After(call0)
- call2.After(call1)
- call3.After(call2)
- call4.After(call3)
+
+ call = mockThrottler.EXPECT().RecordReplicationLag(gomock.Any(), tabletStats)
+ calls = append(calls, call)
+
+ // 2
+ call = mockThrottler.EXPECT().Throttle(0)
+ call.Return(1 * time.Second)
+ calls = append(calls, call)
+
+ // 3
+ // Nothing gets mocked here because the order of evaluation in txThrottler.Throttle() evaluates first
+ // whether the priority allows for throttling or not, so no need to mock calls in mockThrottler.Throttle()
+
+ // 4
+ // Nothing gets mocked here because the order of evaluation in txThrottlerStateImpl.Throttle() evaluates first
+ // whether there is lag or not, so no call to the underlying mockThrottler is issued.
+
+ call = mockThrottler.EXPECT().Close()
+ calls = append(calls, call)
+
+ for i := 1; i < len(calls); i++ {
+ calls[i].After(calls[i-1])
+ }
config := tabletenv.NewDefaultConfig()
config.EnableTxThrottler = true
- config.TxThrottlerHealthCheckCells = []string{"cell1", "cell2"}
+ config.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA}
- throttler, err := tryCreateTxThrottler(config, ts)
- if err != nil {
- t.Fatalf("want: nil, got: %v", err)
- }
+ env := tabletenv.NewEnv(config, t.Name())
+ throttler := NewTxThrottler(env, ts)
+ throttlerImpl, _ := throttler.(*txThrottler)
+ assert.NotNil(t, throttlerImpl)
throttler.InitDBConfig(&querypb.Target{
+ Cell: "cell1",
Keyspace: "keyspace",
Shard: "shard",
})
- if err := throttler.Open(); err != nil {
- t.Fatalf("want: nil, got: %v", err)
- }
- if result := throttler.Throttle(); result != false {
- t.Errorf("want: false, got: %v", result)
- }
- throttler.state.StatsUpdate(tabletStats)
+
+ assert.Nil(t, throttlerImpl.Open())
+ throttlerStateImpl, ok := throttlerImpl.state.(*txThrottlerStateImpl)
+ assert.True(t, ok)
+ assert.Equal(t, map[topodatapb.TabletType]bool{topodatapb.TabletType_REPLICA: true}, throttlerStateImpl.tabletTypes)
+ assert.Equal(t, int64(1), throttlerImpl.throttlerRunning.Get())
+
+ // Stop the go routine that keeps updating the cached shard's max lag to prevent it from changing the value in a
+ // way that will interfere with how we manipulate that value in our tests to evaluate different cases:
+ throttlerStateImpl.done <- true
+
+ // 1 should not throttle due to return value of underlying Throttle(), despite high lag
+ atomic.StoreInt64(&throttlerStateImpl.maxLag, 20)
+ assert.False(t, throttlerImpl.Throttle(100, "some-workload"))
+ assert.Equal(t, int64(1), throttlerImpl.requestsTotal.Counts()["some-workload"])
+ assert.Zero(t, throttlerImpl.requestsThrottled.Counts()["some-workload"])
+
+ throttlerImpl.state.StatsUpdate(tabletStats) // This calls replication lag thing
+ assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksReadTotal.Counts())
+ assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksRecordedTotal.Counts())
rdonlyTabletStats := &discovery.TabletHealth{
Target: &querypb.Target{
+ Cell: "cell2",
TabletType: topodatapb.TabletType_RDONLY,
},
}
- // This call should not be forwarded to the go/vt/throttler.Throttler object.
- throttler.state.StatsUpdate(rdonlyTabletStats)
- // The second throttle call should reject.
- if result := throttler.Throttle(); result != true {
- t.Errorf("want: true, got: %v", result)
- }
+ // This call should not be forwarded to the go/vt/throttlerImpl.Throttler object.
+ throttlerImpl.state.StatsUpdate(rdonlyTabletStats)
+ assert.Equal(t, map[string]int64{"cell1.REPLICA": 1, "cell2.RDONLY": 1}, throttlerImpl.healthChecksReadTotal.Counts())
+ assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksRecordedTotal.Counts())
+
+ // 2 should throttle due to return value of underlying Throttle(), high lag & priority = 100
+ assert.True(t, throttlerImpl.Throttle(100, "some-workload"))
+ assert.Equal(t, int64(2), throttlerImpl.requestsTotal.Counts()["some-workload"])
+ assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some-workload"])
+
+ // 3 should not throttle despite return value of underlying Throttle() and high lag, due to priority = 0
+ assert.False(t, throttlerImpl.Throttle(0, "some-workload"))
+ assert.Equal(t, int64(3), throttlerImpl.requestsTotal.Counts()["some-workload"])
+ assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some-workload"])
+
+ // 4 should not throttle despite return value of underlying Throttle() and priority = 100, due to low lag
+ atomic.StoreInt64(&throttlerStateImpl.maxLag, 1)
+ assert.False(t, throttler.Throttle(100, "some-workload"))
+ assert.Equal(t, int64(4), throttlerImpl.requestsTotal.Counts()["some-workload"])
+ assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some-workload"])
+
throttler.Close()
+ assert.Zero(t, throttlerImpl.throttlerRunning.Get())
+}
+
+func TestFetchKnownCells(t *testing.T) {
+ {
+ ts := memorytopo.NewServer("cell1", "cell2")
+ cells := fetchKnownCells(context.Background(), ts, &querypb.Target{Cell: "cell1"})
+ assert.Equal(t, []string{"cell1", "cell2"}, cells)
+ }
+ {
+ ts := memorytopo.NewServer()
+ cells := fetchKnownCells(context.Background(), ts, &querypb.Target{Cell: "cell1"})
+ assert.Equal(t, []string{"cell1"}, cells)
+ }
+}
+
+func TestDryRunThrottler(t *testing.T) {
+ config := tabletenv.NewDefaultConfig()
+ env := tabletenv.NewEnv(config, t.Name())
+
+ testCases := []struct {
+ Name string
+ txThrottlerStateShouldThrottle bool
+ throttlerDryRun bool
+ expectedResult bool
+ }{
+ {Name: "Real run throttles when txThrottlerStateImpl says it should", txThrottlerStateShouldThrottle: true, throttlerDryRun: false, expectedResult: true},
+ {Name: "Real run does not throttle when txThrottlerStateImpl says it should not", txThrottlerStateShouldThrottle: false, throttlerDryRun: false, expectedResult: false},
+ {Name: "Dry run does not throttle when txThrottlerStateImpl says it should", txThrottlerStateShouldThrottle: true, throttlerDryRun: true, expectedResult: false},
+ {Name: "Dry run does not throttle when txThrottlerStateImpl says it should not", txThrottlerStateShouldThrottle: false, throttlerDryRun: true, expectedResult: false},
+ }
+
+ for _, aTestCase := range testCases {
+ theTestCase := aTestCase
+
+ t.Run(theTestCase.Name, func(t *testing.T) {
+ aTxThrottler := &txThrottler{
+ config: &tabletenv.TabletConfig{
+ EnableTxThrottler: true,
+ TxThrottlerDryRun: theTestCase.throttlerDryRun,
+ },
+ state: &mockTxThrottlerState{shouldThrottle: theTestCase.txThrottlerStateShouldThrottle},
+ throttlerRunning: env.Exporter().NewGauge("TransactionThrottlerRunning", "transaction throttler running state"),
+ requestsTotal: env.Exporter().NewCountersWithSingleLabel("TransactionThrottlerRequests", "transaction throttler requests", "workload"),
+ requestsThrottled: env.Exporter().NewCountersWithSingleLabel("TransactionThrottlerThrottled", "transaction throttler requests throttled", "workload"),
+ }
+
+ assert.Equal(t, theTestCase.expectedResult, aTxThrottler.Throttle(100, "some-workload"))
+ })
+ }
+}
+
+type mockTxThrottlerState struct {
+ shouldThrottle bool
+}
+
+func (t *mockTxThrottlerState) deallocateResources() {
+
+}
+func (t *mockTxThrottlerState) StatsUpdate(tabletStats *discovery.TabletHealth) {
+
+}
+
+func (t *mockTxThrottlerState) throttle() bool {
+ return t.shouldThrottle
}
diff --git a/go/vt/vttablet/tabletserver/vstreamer/copy.go b/go/vt/vttablet/tabletserver/vstreamer/copy.go
index 0065555047d..864dbd5d50c 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/copy.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/copy.go
@@ -255,14 +255,28 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error {
log.Infof("sendFieldEvent returned error %v", err)
return err
}
+ // sendFieldEvent() sends a BEGIN event first.
+ uvs.inTransaction = true
}
+
if len(rows.Rows) == 0 {
log.V(2).Infof("0 rows returned for table %s", tableName)
return nil
}
+ // We are about to send ROW events, so we need to ensure
+ // that we do so within a transaction. The COMMIT event
+ // will be sent in sendEventsForRows() below.
+ if !uvs.inTransaction {
+ evs := []*binlogdatapb.VEvent{{
+ Type: binlogdatapb.VEventType_BEGIN,
+ }}
+ uvs.send(evs)
+ uvs.inTransaction = true
+ }
+
newLastPK = sqltypes.CustomProto3ToResult(uvs.pkfields, &querypb.QueryResult{
- Fields: rows.Fields,
+ Fields: uvs.pkfields,
Rows: []*querypb.Row{rows.Lastpk},
})
qrLastPK := sqltypes.ResultToProto3(newLastPK)
@@ -271,6 +285,8 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error {
log.Infof("sendEventsForRows returned error %v", err)
return err
}
+ // sendEventsForRows() sends a COMMIT event last.
+ uvs.inTransaction = false
uvs.setCopyState(tableName, qrLastPK)
log.V(2).Infof("NewLastPK: %v", qrLastPK)
diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go
index 0dad013e307..1cde5626f7d 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/engine.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go
@@ -88,6 +88,7 @@ type Engine struct {
// vstreamer metrics
vstreamerPhaseTimings *servenv.TimingsWrapper
+ vstreamerCount *stats.Gauge
vstreamerEventsStreamed *stats.Counter
vstreamerPacketSize *stats.GaugeFunc
vstreamerNumPackets *stats.Counter
@@ -125,6 +126,7 @@ func NewEngine(env tabletenv.Env, ts srvtopo.Server, se *schema.Engine, lagThrot
vschemaUpdates: env.Exporter().NewCounter("VSchemaUpdates", "Count of VSchema updates. Does not include errors"),
vstreamerPhaseTimings: env.Exporter().NewTimings("VStreamerPhaseTiming", "Time taken for different phases during vstream copy", "phase-timing"),
+ vstreamerCount: env.Exporter().NewGauge("VStreamerCount", "Current number of vstreamers"),
vstreamerEventsStreamed: env.Exporter().NewCounter("VStreamerEventsStreamed", "Count of events streamed in VStream API"),
vstreamerPacketSize: env.Exporter().NewGaugeFunc("VStreamPacketSize", "Max packet size for sending vstreamer events", getPacketSize),
vstreamerNumPackets: env.Exporter().NewCounter("VStreamerNumPackets", "Number of packets in vstreamer"),
diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go
index d45dceda1b5..0378d04c373 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go
@@ -698,7 +698,26 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp
FixedValue: sqltypes.NewInt64(num),
}, nil
case *sqlparser.ConvertUsingExpr:
- colnum, err := findColumn(plan.Table, aliased.As)
+ // Here we find the actual column name in the convert, in case
+ // this is a column rename and the AS is the new column.
+ // For example, in convert(c1 using utf8mb4) as c2, we want to find
+ // c1, because c1 exists in the current table whereas c2 is the renamed column
+ // in the desired table.
+ var colName sqlparser.IdentifierCI
+ err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ switch node := node.(type) {
+ case *sqlparser.ColName:
+ if !node.Qualifier.IsEmpty() {
+ return false, fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(node))
+ }
+ colName = node.Name
+ }
+ return true, nil
+ }, aliased.Expr)
+ if err != nil {
+ return ColExpr{}, fmt.Errorf("failed to find column name for convert using expression: %v, %v", sqlparser.String(aliased.Expr), err)
+ }
+ colnum, err := findColumn(plan.Table, colName)
if err != nil {
return ColExpr{}, err
}
diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go
index 4ec57b15b7d..d1caac6370c 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go
@@ -427,6 +427,25 @@ func TestPlanBuilder(t *testing.T) {
KeyRange: nil,
}},
},
+ }, {
+ inTable: t1,
+ inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select convert(val using utf8mb4) as val2, id as id from t1"},
+ outPlan: &Plan{
+ ColExprs: []ColExpr{{
+ ColNum: 1,
+ Field: &querypb.Field{
+ Name: "val",
+ Type: sqltypes.VarBinary,
+ },
+ }, {
+ ColNum: 0,
+ Field: &querypb.Field{
+ Name: "id",
+ Type: sqltypes.Int64,
+ },
+ }},
+ convertUsingUTF8Columns: map[string]bool{"val": true},
+ },
}, {
inTable: regional,
inRule: &binlogdatapb.Rule{Match: "regional", Filter: "select id, keyspace_id() from regional"},
diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go
index 67be0513e67..560546d7c04 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go
@@ -153,9 +153,14 @@ func (rs *rowStreamer) buildPlan() error {
return err
}
ti := &Table{
- Name: st.Name,
- Fields: st.Fields,
+ Name: st.Name,
}
+
+ ti.Fields, err = getFields(rs.ctx, rs.cp, st.Name, rs.cp.DBName(), st.Fields)
+ if err != nil {
+ return err
+ }
+
// The plan we build is identical to the one for vstreamer.
// This is because the row format of a read is identical
// to the row format of a binlog event. So, the same
diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go
index 2a9ac5a47ff..4115a006c37 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go
@@ -72,7 +72,7 @@ func TestStreamRowsScan(t *testing.T) {
// t1: simulates rollup, with non-pk column
wantStream = []string{
- `fields:{name:"1" type:INT64} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"1" type:INT64} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"1bbb"} lastpk:{lengths:1 values:"2"}`,
}
wantQuery = "select id, val from t1 order by id"
@@ -80,7 +80,7 @@ func TestStreamRowsScan(t *testing.T) {
// t1: simulates rollup, with pk and non-pk column
wantStream = []string{
- `fields:{name:"1" type:INT64} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"1" type:INT64} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:1 lengths:1 lengths:3 values:"11aaa"} rows:{lengths:1 lengths:1 lengths:3 values:"12bbb"} lastpk:{lengths:1 values:"2"}`,
}
wantQuery = "select id, val from t1 order by id"
@@ -88,7 +88,7 @@ func TestStreamRowsScan(t *testing.T) {
// t1: no pk in select list
wantStream = []string{
- `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:3 values:"aaa"} rows:{lengths:3 values:"bbb"} lastpk:{lengths:1 values:"2"}`,
}
wantQuery = "select id, val from t1 order by id"
@@ -96,7 +96,7 @@ func TestStreamRowsScan(t *testing.T) {
// t1: all rows
wantStream = []string{
- `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 values:"2"}`,
}
wantQuery = "select id, val from t1 order by id"
@@ -104,7 +104,7 @@ func TestStreamRowsScan(t *testing.T) {
// t1: lastpk=1
wantStream = []string{
- `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 values:"2"}`,
}
wantQuery = "select id, val from t1 where (id > 1) order by id"
@@ -112,7 +112,7 @@ func TestStreamRowsScan(t *testing.T) {
// t1: different column ordering
wantStream = []string{
- `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:3 lengths:1 values:"aaa1"} rows:{lengths:3 lengths:1 values:"bbb2"} lastpk:{lengths:1 values:"2"}`,
}
wantQuery = "select id, val from t1 order by id"
@@ -120,7 +120,7 @@ func TestStreamRowsScan(t *testing.T) {
// t2: all rows
wantStream = []string{
- `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32}`,
`rows:{lengths:1 lengths:1 lengths:3 values:"12aaa"} rows:{lengths:1 lengths:1 lengths:3 values:"13bbb"} lastpk:{lengths:1 lengths:1 values:"13"}`,
}
wantQuery = "select id1, id2, val from t2 order by id1, id2"
@@ -128,7 +128,7 @@ func TestStreamRowsScan(t *testing.T) {
// t2: lastpk=1,2
wantStream = []string{
- `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32}`,
`rows:{lengths:1 lengths:1 lengths:3 values:"13bbb"} lastpk:{lengths:1 lengths:1 values:"13"}`,
}
wantQuery = "select id1, id2, val from t2 where (id1 = 1 and id2 > 2) or (id1 > 1) order by id1, id2"
@@ -136,7 +136,7 @@ func TestStreamRowsScan(t *testing.T) {
// t3: all rows
wantStream = []string{
- `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32} pkfields:{name:"val" type:VARBINARY}`,
+ `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32} pkfields:{name:"val" type:VARBINARY}`,
`rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 lengths:3 values:"2bbb"}`,
}
wantQuery = "select id, val from t3 order by id, val"
@@ -144,7 +144,7 @@ func TestStreamRowsScan(t *testing.T) {
// t3: lastpk: 1,'aaa'
wantStream = []string{
- `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32} pkfields:{name:"val" type:VARBINARY}`,
+ `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32} pkfields:{name:"val" type:VARBINARY}`,
`rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 lengths:3 values:"2bbb"}`,
}
wantQuery = "select id, val from t3 where (id = 1 and val > 'aaa') or (id > 1) order by id, val"
@@ -152,7 +152,7 @@ func TestStreamRowsScan(t *testing.T) {
// t4: all rows
wantStream = []string{
- `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32} pkfields:{name:"id3" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32} pkfields:{name:"id3" type:INT32}`,
`rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:"123aaa"} rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:"234bbb"} lastpk:{lengths:1 lengths:1 lengths:1 values:"234"}`,
}
wantQuery = "select id1, id2, id3, val from t4 order by id1, id2, id3"
@@ -160,7 +160,7 @@ func TestStreamRowsScan(t *testing.T) {
// t4: lastpk: 1,2,3
wantStream = []string{
- `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32} pkfields:{name:"id3" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32} pkfields:{name:"id2" type:INT32} pkfields:{name:"id3" type:INT32}`,
`rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:"234bbb"} lastpk:{lengths:1 lengths:1 lengths:1 values:"234"}`,
}
wantQuery = "select id1, id2, id3, val from t4 where (id1 = 1 and id2 = 2 and id3 > 3) or (id1 = 1 and id2 > 2) or (id1 > 1) order by id1, id2, id3"
@@ -255,7 +255,7 @@ func TestStreamRowsKeyRange(t *testing.T) {
// Only the first row should be returned, but lastpk should be 6.
wantStream := []string{
- `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32}`,
`rows:{lengths:1 lengths:3 values:"1aaa"} lastpk:{lengths:1 values:"6"}`,
}
wantQuery := "select id1, val from t1 order by id1"
@@ -287,7 +287,7 @@ func TestStreamRowsFilterInt(t *testing.T) {
time.Sleep(1 * time.Second)
wantStream := []string{
- `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32}`,
`rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"4ddd"} lastpk:{lengths:1 values:"5"}`,
}
wantQuery := "select id1, id2, val from t1 order by id1"
@@ -320,7 +320,7 @@ func TestStreamRowsFilterVarBinary(t *testing.T) {
time.Sleep(1 * time.Second)
wantStream := []string{
- `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32}`,
+ `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32}`,
`rows:{lengths:1 lengths:6 values:"2newton"} rows:{lengths:1 lengths:6 values:"3newton"} rows:{lengths:1 lengths:6 values:"5newton"} lastpk:{lengths:1 values:"6"}`,
}
wantQuery := "select id1, val from t1 order by id1"
@@ -346,7 +346,7 @@ func TestStreamRowsMultiPacket(t *testing.T) {
engine.se.Reload(context.Background())
wantStream := []string{
- `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32}`,
+ `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32}`,
`rows:{lengths:1 lengths:3 values:"1234"} rows:{lengths:1 lengths:4 values:"26789"} rows:{lengths:1 lengths:1 values:"31"} lastpk:{lengths:1 values:"3"}`,
`rows:{lengths:1 lengths:10 values:"42345678901"} lastpk:{lengths:1 values:"4"}`,
`rows:{lengths:1 lengths:1 values:"52"} lastpk:{lengths:1 values:"5"}`,
@@ -415,7 +415,9 @@ func checkStream(t *testing.T, query string, lastpk []sqltypes.Value, wantQuery
re, _ := regexp.Compile(` flags:[\d]+`)
srows = re.ReplaceAllString(srows, "")
- if srows != wantStream[i] {
+ want := env.RemoveAnyDeprecatedDisplayWidths(wantStream[i])
+
+ if srows != want {
ch <- fmt.Errorf("stream %d:\n%s, want\n%s", i, srows, wantStream[i])
}
i++
diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
index 40bf27dd0cf..34f13268a06 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
@@ -51,13 +51,17 @@ type uvstreamer struct {
cancel func()
// input parameters
- vse *Engine
- send func([]*binlogdatapb.VEvent) error
- cp dbconfigs.Connector
- se *schema.Engine
- startPos string
- filter *binlogdatapb.Filter
- inTablePKs []*binlogdatapb.TableLastPK
+ vse *Engine
+ send func([]*binlogdatapb.VEvent) error
+ cp dbconfigs.Connector
+ se *schema.Engine
+ startPos string
+ // Are we currently in an explicit transaction?
+ // If we are not, and we're about to send ROW
+ // events, then we need to send a BEGIN event first.
+ inTransaction bool
+ filter *binlogdatapb.Filter
+ inTablePKs []*binlogdatapb.TableLastPK
vschema *localVSchema
@@ -218,7 +222,8 @@ func getQuery(tableName string, filter string) string {
query = buf.String()
case key.IsKeyRange(filter):
buf := sqlparser.NewTrackedBuffer(nil)
- buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewIdentifierCS(tableName), sqlparser.NewStrLiteral(filter))
+ buf.Myprintf("select * from %v where in_keyrange(%v)",
+ sqlparser.NewIdentifierCS(tableName), sqlparser.NewStrLiteral(filter))
query = buf.String()
}
return query
@@ -229,7 +234,40 @@ func (uvs *uvstreamer) Cancel() {
uvs.cancel()
}
-// during copy phase only send streaming events (during catchup/fastforward) for pks already seen
+// We have not yet implemented the logic to check if an event is for a row that is already copied,
+// so we always return true so that we send all events for this table and so we don't miss events.
+func (uvs *uvstreamer) isRowCopied(tableName string, ev *binlogdatapb.VEvent) bool {
+ return true
+}
+
+// Only send catchup/fastforward events for tables whose copy phase is complete or in progress.
+// This ensures we fulfill the at-least-once delivery semantics for events.
+// TODO: filter out events for rows not yet copied. Note that we can only do this as a best-effort
+// for comparable PKs.
+func (uvs *uvstreamer) shouldSendEventForTable(tableName string, ev *binlogdatapb.VEvent) bool {
+ table, ok := uvs.plans[tableName]
+ // Event is for a table which is not in its copy phase.
+ if !ok {
+ return true
+ }
+
+ // if table copy was not started and no tablePK was specified we can ignore catchup/fastforward events for it
+ if table.tablePK == nil || table.tablePK.Lastpk == nil {
+ return false
+ }
+
+ // Table is currently in its copy phase. We have not yet implemented the logic to
+ // check if an event is for a row that is already copied, so we always return true
+ // there so that we don't miss events.
+ // We may send duplicate insert events or update/delete events for rows not yet seen
+ // to the client for the table being copied. This is ok as the client is expected to be
+ // idempotent: we only promise at-least-once semantics for VStream API (not exactly-once).
+ // Aside: vreplication workflows handle at-least-once by adding where clauses that render
+ // DML queries, related to events for rows not yet copied, as no-ops.
+ return uvs.isRowCopied(tableName, ev)
+}
+
+// Do not send internal heartbeat events. Filter out events for tables whose copy has not been started.
func (uvs *uvstreamer) filterEvents(evs []*binlogdatapb.VEvent) []*binlogdatapb.VEvent {
if len(uvs.plans) == 0 {
return evs
@@ -239,25 +277,21 @@ func (uvs *uvstreamer) filterEvents(evs []*binlogdatapb.VEvent) []*binlogdatapb.
var shouldSend bool
for _, ev := range evs {
- shouldSend = false
- tableName = ""
switch ev.Type {
case binlogdatapb.VEventType_ROW:
tableName = ev.RowEvent.TableName
case binlogdatapb.VEventType_FIELD:
tableName = ev.FieldEvent.TableName
+ default:
+ tableName = ""
+ }
+ switch ev.Type {
case binlogdatapb.VEventType_HEARTBEAT:
shouldSend = false
default:
- shouldSend = true
- }
- if !shouldSend && tableName != "" {
- shouldSend = true
- _, ok := uvs.plans[tableName]
- if ok {
- shouldSend = false
- }
+ shouldSend = uvs.shouldSendEventForTable(tableName, ev)
}
+
if shouldSend {
evs2 = append(evs2, ev)
}
@@ -331,7 +365,9 @@ func (uvs *uvstreamer) setStreamStartPosition() error {
}
if !curPos.AtLeast(pos) {
uvs.vse.errorCounts.Add("GTIDSet Mismatch", 1)
- return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "GTIDSet Mismatch: requested source position:%v, current target vrep position: %v", mysql.EncodePosition(pos), mysql.EncodePosition(curPos))
+ return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT,
+ "GTIDSet Mismatch: requested source position:%v, current target vrep position: %v",
+ mysql.EncodePosition(pos), mysql.EncodePosition(curPos))
}
uvs.pos = pos
return nil
@@ -346,17 +382,22 @@ func (uvs *uvstreamer) currentPosition() (mysql.Position, error) {
return conn.PrimaryPosition()
}
+// Possible states:
+// 1. TablePKs nil, startPos set to gtid or "current" => start replicating from pos
+// 2. TablePKs nil, startPos empty => full table copy of tables matching filter
+// 3. TablePKs not nil, startPos empty => table copy (for pks > lastPK)
+// 4. TablePKs not nil, startPos set => run catchup from startPos, then table copy (for pks > lastPK)
func (uvs *uvstreamer) init() error {
- if uvs.startPos != "" {
- if err := uvs.setStreamStartPosition(); err != nil {
+ if uvs.startPos == "" /* full copy */ || len(uvs.inTablePKs) > 0 /* resume copy */ {
+ if err := uvs.buildTablePlan(); err != nil {
return err
}
- } else if uvs.startPos == "" || len(uvs.inTablePKs) > 0 {
- if err := uvs.buildTablePlan(); err != nil {
+ }
+ if uvs.startPos != "" {
+ if err := uvs.setStreamStartPosition(); err != nil {
return err
}
}
-
if uvs.pos.IsZero() && (len(uvs.plans) == 0) {
return fmt.Errorf("stream needs a position or a table to copy")
}
@@ -376,9 +417,12 @@ func (uvs *uvstreamer) Stream() error {
uvs.vse.errorCounts.Add("Copy", 1)
return err
}
- uvs.sendTestEvent("Copy Done")
+ if err := uvs.allCopyComplete(); err != nil {
+ return err
+ }
}
- vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, mysql.EncodePosition(uvs.pos), mysql.EncodePosition(uvs.stopPos), uvs.filter, uvs.getVSchema(), uvs.send, "replicate", uvs.vse)
+ vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, mysql.EncodePosition(uvs.pos), mysql.EncodePosition(uvs.stopPos),
+ uvs.filter, uvs.getVSchema(), uvs.send, "replicate", uvs.vse)
uvs.setVs(vs)
return vs.Stream()
@@ -418,6 +462,17 @@ func (uvs *uvstreamer) setCopyState(tableName string, qr *querypb.QueryResult) {
uvs.plans[tableName].tablePK.Lastpk = qr
}
+func (uvs *uvstreamer) allCopyComplete() error {
+ ev := &binlogdatapb.VEvent{
+ Type: binlogdatapb.VEventType_COPY_COMPLETED,
+ }
+
+ if err := uvs.send([]*binlogdatapb.VEvent{ev}); err != nil {
+ return err
+ }
+ return nil
+}
+
// dummy event sent only in test mode
func (uvs *uvstreamer) sendTestEvent(msg string) {
if !uvstreamerTestMode {
diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go
index fdd60b8207f..8ca43f008b6 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go
@@ -182,6 +182,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) {
uvstreamerTestMode = true
defer func() { uvstreamerTestMode = false }()
initialize(t)
+
if err := engine.se.Reload(context.Background()); err != nil {
t.Fatal("Error reloading schema")
}
@@ -190,6 +191,12 @@ func TestVStreamCopyCompleteFlow(t *testing.T) {
var tablePKs []*binlogdatapb.TableLastPK
for i, table := range testState.tables {
rules = append(rules, getRule(table))
+
+ // for table t2, let tablepk be nil, so that we don't send events for the insert in initTables()
+ if table == "t2" {
+ continue
+ }
+
tablePKs = append(tablePKs, getTablePK(table, i+1))
}
filter := &binlogdatapb.Filter{
@@ -226,7 +233,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) {
}
- callbacks["OTHER.*Copy Done"] = func() {
+ callbacks["COPY_COMPLETED"] = func() {
log.Info("Copy done, inserting events to stream")
insertRow(t, "t1", 1, numInitialRows+4)
insertRow(t, "t2", 2, numInitialRows+3)
@@ -245,8 +252,8 @@ commit;"
}
numCopyEvents := 3 /*t1,t2,t3*/ * (numInitialRows + 1 /*FieldEvent*/ + 1 /*LastPKEvent*/ + 1 /*TestEvent: Copy Start*/ + 2 /*begin,commit*/ + 3 /* LastPK Completed*/)
- numCopyEvents += 2 /* GTID + Test event after all copy is done */
- numCatchupEvents := 3 * 5 /*2 t1, 1 t2 : BEGIN+FIELD+ROW+GTID+COMMIT*/
+ numCopyEvents += 2 /* GTID + Event after all copy is done */
+ numCatchupEvents := 3 * 5 /* 2 t1, 1 t2 : BEGIN+FIELD+ROW+GTID+COMMIT */
numFastForwardEvents := 5 /*t1:FIELD+ROW*/
numMisc := 1 /* t2 insert during t1 catchup that comes in t2 copy */
numReplicateEvents := 2*5 /* insert into t1/t2 */ + 6 /* begin/field/2 inserts/gtid/commit */
@@ -470,7 +477,7 @@ var expectedEvents = []string{
"type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:2 values:\"880\"}}}",
"type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:2 values:\"990\"}}}",
"type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:2 lengths:3 values:\"10100\"}}}",
- "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\" lastpk:{rows:{lengths:2 values:\"10\"}}}}",
+ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\" lastpk:{fields:{name:\"id11\" type:INT32} rows:{lengths:2 values:\"10\"}}}}",
"type:COMMIT",
"type:BEGIN",
"type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\"} completed:true}",
@@ -499,7 +506,7 @@ var expectedEvents = []string{
"type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:1 lengths:3 values:\"9180\"}}}",
"type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:2 lengths:3 values:\"10200\"}}}",
"type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:2 lengths:3 values:\"11220\"}}}",
- "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2\" lastpk:{rows:{lengths:2 values:\"11\"}}}}",
+ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2\" lastpk:{fields:{name:\"id21\" type:INT32} rows:{lengths:2 values:\"11\"}}}}",
"type:COMMIT",
"type:BEGIN",
"type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2\"} completed:true}",
@@ -527,12 +534,12 @@ var expectedEvents = []string{
"type:ROW row_event:{table_name:\"t3\" row_changes:{after:{lengths:1 lengths:3 values:\"8240\"}}}",
"type:ROW row_event:{table_name:\"t3\" row_changes:{after:{lengths:1 lengths:3 values:\"9270\"}}}",
"type:ROW row_event:{table_name:\"t3\" row_changes:{after:{lengths:2 lengths:3 values:\"10300\"}}}",
- "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t3\" lastpk:{rows:{lengths:2 values:\"10\"}}}}",
+ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t3\" lastpk:{fields:{name:\"id31\" type:INT32} rows:{lengths:2 values:\"10\"}}}}",
"type:COMMIT",
"type:BEGIN",
"type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t3\"} completed:true}",
"type:COMMIT",
- "type:OTHER gtid:\"Copy Done\"",
+ "type:COPY_COMPLETED",
"type:BEGIN",
"type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}",
"type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:2 lengths:3 values:\"14140\"}}}",
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
index da61163a6ca..2ca077401c0 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
@@ -24,6 +24,7 @@ import (
"time"
"google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/timer"
@@ -163,7 +164,11 @@ func (vs *vstreamer) Cancel() {
func (vs *vstreamer) Stream() error {
//defer vs.cancel()
ctx := context.Background()
- defer ctx.Done()
+ vs.vse.vstreamerCount.Add(1)
+ defer func() {
+ ctx.Done()
+ vs.vse.vstreamerCount.Add(-1)
+ }()
vs.vse.vstreamersCreated.Add(1)
log.Infof("Starting Stream() with startPos %s", vs.startPos)
pos, err := mysql.DecodePosition(vs.startPos)
@@ -778,39 +783,16 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er
}
// Columns should be truncated to match those in tm.
- fields = st.Fields[:len(tm.Types)]
- extColInfos, err := vs.getExtColInfos(tm.Name, tm.Database)
+ fieldsCopy, err := getFields(vs.ctx, vs.cp, tm.Name, tm.Database, st.Fields[:len(tm.Types)])
if err != nil {
return nil, err
}
- for _, field := range fields {
- // we want the MySQL column type info so that we can properly handle
- // ambiguous binlog events and other cases where the internal types
- // don't match the MySQL column type. One example being that in binlog
- // events CHAR columns with a binary collation are indistinguishable
- // from BINARY columns.
- if extColInfo, ok := extColInfos[field.Name]; ok {
- field.ColumnType = extColInfo.columnType
- }
- }
- return fields, nil
-}
-
-// additional column attributes from information_schema.columns. Currently only column_type is used, but
-// we expect to add more in the future
-type extColInfo struct {
- columnType string
-}
-
-func encodeString(in string) string {
- buf := bytes.NewBuffer(nil)
- sqltypes.NewVarChar(in).EncodeSQL(buf)
- return buf.String()
+ return fieldsCopy, nil
}
-func (vs *vstreamer) getExtColInfos(table, database string) (map[string]*extColInfo, error) {
+func getExtColInfos(ctx context.Context, cp dbconfigs.Connector, table, database string) (map[string]*extColInfo, error) {
extColInfos := make(map[string]*extColInfo)
- conn, err := vs.cp.Connect(vs.ctx)
+ conn, err := cp.Connect(ctx)
if err != nil {
return nil, err
}
@@ -830,6 +812,37 @@ func (vs *vstreamer) getExtColInfos(table, database string) (map[string]*extColI
return extColInfos, nil
}
+func getFields(ctx context.Context, cp dbconfigs.Connector, table, database string, fields []*querypb.Field) ([]*querypb.Field, error) {
+ // Make a deep copy of the schema.Engine fields as they are pointers and
+ // will be modified by adding ColumnType below
+ fieldsCopy := make([]*querypb.Field, len(fields))
+ for i, field := range fields {
+ fieldsCopy[i] = proto.Clone(field).(*querypb.Field)
+ }
+ extColInfos, err := getExtColInfos(ctx, cp, table, database)
+ if err != nil {
+ return nil, err
+ }
+ for _, field := range fieldsCopy {
+ if colInfo, ok := extColInfos[field.Name]; ok {
+ field.ColumnType = colInfo.columnType
+ }
+ }
+ return fieldsCopy, nil
+}
+
+// additional column attributes from information_schema.columns. Currently only column_type is used, but
+// we expect to add more in the future
+type extColInfo struct {
+ columnType string
+}
+
+func encodeString(in string) string {
+ buf := bytes.NewBuffer(nil)
+ sqltypes.NewVarChar(in).EncodeSQL(buf)
+ return buf.String()
+}
+
func (vs *vstreamer) processJournalEvent(vevents []*binlogdatapb.VEvent, plan *streamerPlan, rows mysql.Rows) ([]*binlogdatapb.VEvent, error) {
// Get DbName
params, err := vs.cp.MysqlParams()
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
index 191ba408f97..5d518558e87 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
@@ -418,8 +418,8 @@ func TestVStreamCopySimpleFlow(t *testing.T) {
tablePKs = append(tablePKs, getTablePK("t1", 1))
tablePKs = append(tablePKs, getTablePK("t2", 2))
- t1FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63}}"}
- t2FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63}}"}
+ t1FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}"}
+ t2FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"}}"}
t1Events := []string{}
t2Events := []string{}
for i := 1; i <= 10; i++ {
@@ -447,7 +447,7 @@ func TestVStreamCopySimpleFlow(t *testing.T) {
testcases := []testcase{
{
input: []string{},
- output: [][]string{t1FieldEvent, {"gtid"}, t1Events, {"begin", "lastpk", "commit"}, t2FieldEvent, t2Events, {"begin", "lastpk", "commit"}},
+ output: [][]string{t1FieldEvent, {"gtid"}, t1Events, {"begin", "lastpk", "commit"}, t2FieldEvent, t2Events, {"begin", "lastpk", "commit"}, {"copy_completed"}},
},
{
@@ -503,27 +503,27 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) {
var expectedEvents = []string{
"type:BEGIN",
- "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id1\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63} fields:{name:\"id2\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63}}",
+ "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id1\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id2\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}",
"type:GTID",
"type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:1 values:\"12\"}}}",
- "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\" lastpk:{rows:{lengths:1 values:\"1\"}}}}",
+ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\" lastpk:{fields:{name:\"id1\" type:INT32} rows:{lengths:1 values:\"1\"}}}}",
"type:COMMIT",
"type:BEGIN",
"type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\"} completed:true}",
"type:COMMIT",
"type:BEGIN",
- "type:FIELD field_event:{table_name:\"t2a\" fields:{name:\"id1\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63} fields:{name:\"id2\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63}}",
+ "type:FIELD field_event:{table_name:\"t2a\" fields:{name:\"id1\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id2\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}",
"type:ROW row_event:{table_name:\"t2a\" row_changes:{after:{lengths:1 lengths:1 values:\"14\"}}}",
- "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\" lastpk:{rows:{lengths:1 values:\"1\"}}}}",
+ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\" lastpk:{fields:{name:\"id1\" type:INT32} rows:{lengths:1 values:\"1\"}}}}",
"type:COMMIT",
"type:BEGIN",
"type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\"} completed:true}",
"type:COMMIT",
"type:BEGIN",
- "type:FIELD field_event:{table_name:\"t2b\" fields:{name:\"id1\" type:VARCHAR table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id1\" column_length:80 charset:45} fields:{name:\"id2\" type:INT32 table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63}}",
+ "type:FIELD field_event:{table_name:\"t2b\" fields:{name:\"id1\" type:VARCHAR table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id1\" column_length:80 charset:45 column_type:\"varchar(20)\"} fields:{name:\"id2\" type:INT32 table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}",
"type:ROW row_event:{table_name:\"t2b\" row_changes:{after:{lengths:1 lengths:1 values:\"a5\"}}}",
"type:ROW row_event:{table_name:\"t2b\" row_changes:{after:{lengths:1 lengths:1 values:\"b6\"}}}",
- "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2b\" lastpk:{rows:{lengths:1 values:\"b\"}}}}",
+ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2b\" lastpk:{fields:{name:\"id1\" type:VARCHAR} rows:{lengths:1 values:\"b\"}}}}",
"type:COMMIT",
"type:BEGIN",
"type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2b\"} completed:true}",
@@ -566,8 +566,11 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) {
}
got := ev.String()
want := expectedEvents[i]
+
+ want = env.RemoveAnyDeprecatedDisplayWidths(want)
+
if !strings.HasPrefix(got, want) {
- errGoroutine = fmt.Errorf("Event %d did not match, want %s, got %s", i, want, got)
+ errGoroutine = fmt.Errorf("event %d did not match, want %s, got %s", i, want, got)
return errGoroutine
}
}
@@ -2178,6 +2181,10 @@ func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlog
if evs[i].Type != binlogdatapb.VEventType_DDL {
t.Fatalf("%v (%d): event: %v, want ddl", input, i, evs[i])
}
+ case "copy_completed":
+ if evs[i].Type != binlogdatapb.VEventType_COPY_COMPLETED {
+ t.Fatalf("%v (%d): event: %v, want copy_completed", input, i, evs[i])
+ }
default:
evs[i].Timestamp = 0
if evs[i].Type == binlogdatapb.VEventType_FIELD {
diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go
index 646c1b71ce8..cfa040b7d43 100644
--- a/go/vt/vttablet/tmclient/rpc_client_api.go
+++ b/go/vt/vttablet/tmclient/rpc_client_api.go
@@ -238,7 +238,7 @@ type TabletManagerClient interface {
Backup(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.BackupRequest) (logutil.EventStream, error)
// RestoreFromBackup deletes local data and restores database from backup
- RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time) (logutil.EventStream, error)
+ RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, backupTime time.Time, allowedBackupEngines []string) (logutil.EventStream, error)
//
// Management methods
diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go
index e3f3e30fa43..98d5fa8aa55 100644
--- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go
+++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go
@@ -592,7 +592,7 @@ func (fra *fakeRPCTM) ExecuteQuery(ctx context.Context, req *tabletmanagerdatapb
}
var testExecuteFetchQuery = []byte("fetch this invalid utf8 character \x80")
-var testExecuteFetchMaxRows = 100
+var testExecuteFetchMaxRows = uint64(100)
var testExecuteFetchResult = &querypb.QueryResult{
Fields: []*querypb.Field{
{
@@ -1190,9 +1190,9 @@ func tmRPCTestReplicaWasRestartedPanic(ctx context.Context, t *testing.T, client
func tmRPCTestStopReplicationAndGetStatus(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
rp, err := client.StopReplicationAndGetStatus(ctx, tablet, replicationdatapb.StopReplicationMode_IOANDSQLTHREAD)
- compareError(t, "StopReplicationAndGetStatus", err, rp, testReplicationStatus)
+ compareError(t, "StopReplicationAndGetStatus", err, rp, &replicationdatapb.StopReplicationStatus{Before: testReplicationStatus, After: testReplicationStatus})
rp, err = client.StopReplicationAndGetStatus(ctx, tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY)
- compareError(t, "StopReplicationAndGetStatus", err, rp, testReplicationStatus)
+ compareError(t, "StopReplicationAndGetStatus", err, rp, &replicationdatapb.StopReplicationStatus{Before: testReplicationStatus, After: testReplicationStatus})
}
func tmRPCTestStopReplicationAndGetStatusPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
@@ -1221,7 +1221,7 @@ func tmRPCTestPromoteReplicaPanic(ctx context.Context, t *testing.T, client tmcl
// Backup / restore related methods
//
-var testBackupConcurrency = 24
+var testBackupConcurrency = int64(24)
var testBackupAllowPrimary = false
var testBackupCalled = false
var testRestoreFromBackupCalled = false
@@ -1237,6 +1237,10 @@ func (fra *fakeRPCTM) Backup(ctx context.Context, logger logutil.Logger, request
return nil
}
+func (fra *fakeRPCTM) IsBackupRunning() bool {
+ return false
+}
+
func tmRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) {
req := &tabletmanagerdatapb.BackupRequest{Concurrency: int64(testBackupConcurrency), AllowPrimary: testBackupAllowPrimary}
stream, err := client.Backup(ctx, tablet, req)
@@ -1260,7 +1264,7 @@ func tmRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient.Tab
expectHandleRPCPanic(t, "Backup", true /*verbose*/, err)
}
-func (fra *fakeRPCTM) RestoreFromBackup(ctx context.Context, logger logutil.Logger, backupTime time.Time) error {
+func (fra *fakeRPCTM) RestoreFromBackup(ctx context.Context, logger logutil.Logger, backupTime time.Time, allowedBackupEngines []string) error {
if fra.panics {
panic(fmt.Errorf("test-triggered panic"))
}
@@ -1269,8 +1273,8 @@ func (fra *fakeRPCTM) RestoreFromBackup(ctx context.Context, logger logutil.Logg
return nil
}
-func tmRPCTestRestoreFromBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, backupTime time.Time) {
- stream, err := client.RestoreFromBackup(ctx, tablet, backupTime)
+func tmRPCTestRestoreFromBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, backupTime time.Time, allowedBackupEngines []string) {
+ stream, err := client.RestoreFromBackup(ctx, tablet, backupTime, allowedBackupEngines)
if err != nil {
t.Fatalf("RestoreFromBackup failed: %v", err)
}
@@ -1278,8 +1282,8 @@ func tmRPCTestRestoreFromBackup(ctx context.Context, t *testing.T, client tmclie
compareError(t, "RestoreFromBackup", err, true, testRestoreFromBackupCalled)
}
-func tmRPCTestRestoreFromBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, backupTime time.Time) {
- stream, err := client.RestoreFromBackup(ctx, tablet, backupTime)
+func tmRPCTestRestoreFromBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, backupTime time.Time, allowedBackupEngines []string) {
+ stream, err := client.RestoreFromBackup(ctx, tablet, backupTime, allowedBackupEngines)
if err != nil {
t.Fatalf("RestoreFromBackup failed: %v", err)
}
@@ -1312,6 +1316,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.T
ctx := context.Background()
backupTime := time.Time{}
+ allowedBackupEngines := []string{}
// Test RPC specific methods of the interface.
tmRPCTestDialExpiredContext(ctx, t, client, tablet)
@@ -1367,7 +1372,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.T
// Backup / restore related methods
tmRPCTestBackup(ctx, t, client, tablet)
- tmRPCTestRestoreFromBackup(ctx, t, client, tablet, backupTime)
+ tmRPCTestRestoreFromBackup(ctx, t, client, tablet, backupTime, allowedBackupEngines)
//
// Tests panic handling everywhere now
@@ -1419,7 +1424,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.T
tmRPCTestReplicaWasRestartedPanic(ctx, t, client, tablet)
// Backup / restore related methods
tmRPCTestBackupPanic(ctx, t, client, tablet)
- tmRPCTestRestoreFromBackupPanic(ctx, t, client, tablet, backupTime)
+ tmRPCTestRestoreFromBackupPanic(ctx, t, client, tablet, backupTime, allowedBackupEngines)
client.Close()
}
diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go
index 40ba6937e27..ba8218963ba 100644
--- a/go/vt/vttest/local_cluster.go
+++ b/go/vt/vttest/local_cluster.go
@@ -151,20 +151,20 @@ type Config struct {
// It then sets the right value for cfg.SchemaDir.
// At the end of the test, the caller should os.RemoveAll(cfg.SchemaDir).
func (cfg *Config) InitSchemas(keyspace, schema string, vschema *vschemapb.Keyspace) error {
- if cfg.SchemaDir != "" {
- return fmt.Errorf("SchemaDir is already set to %v", cfg.SchemaDir)
- }
-
- // Create a base temporary directory.
- tempSchemaDir, err := os.MkdirTemp("", "vttest")
- if err != nil {
- return err
+ schemaDir := cfg.SchemaDir
+ if schemaDir == "" {
+ // Create a base temporary directory.
+ tempSchemaDir, err := os.MkdirTemp("", "vttest")
+ if err != nil {
+ return err
+ }
+ schemaDir = tempSchemaDir
}
// Write the schema if set.
if schema != "" {
- ksDir := path.Join(tempSchemaDir, keyspace)
- err = os.Mkdir(ksDir, os.ModeDir|0775)
+ ksDir := path.Join(schemaDir, keyspace)
+ err := os.Mkdir(ksDir, os.ModeDir|0775)
if err != nil {
return err
}
@@ -177,7 +177,7 @@ func (cfg *Config) InitSchemas(keyspace, schema string, vschema *vschemapb.Keysp
// Write in the vschema if set.
if vschema != nil {
- vschemaFilePath := path.Join(tempSchemaDir, keyspace, "vschema.json")
+ vschemaFilePath := path.Join(schemaDir, keyspace, "vschema.json")
vschemaJSON, err := json.Marshal(vschema)
if err != nil {
return err
@@ -186,7 +186,7 @@ func (cfg *Config) InitSchemas(keyspace, schema string, vschema *vschemapb.Keysp
return err
}
}
- cfg.SchemaDir = tempSchemaDir
+ cfg.SchemaDir = schemaDir
return nil
}
diff --git a/go/vt/vttest/topoctl.go b/go/vt/vttest/topoctl.go
index 2b63900d6d8..1fd4cb6e101 100644
--- a/go/vt/vttest/topoctl.go
+++ b/go/vt/vttest/topoctl.go
@@ -31,6 +31,7 @@ func (ctl *Topoctl) Setup() error {
if err != nil {
return err
}
+ defer topoServer.Close()
log.Infof("Creating cells if they don't exist in the provided topo server %s %s %s", ctl.TopoImplementation, ctl.TopoGlobalServerAddress, ctl.TopoGlobalRoot)
// Create cells if it doesn't exist to be idempotent. Should work when we share the same topo server across multiple local clusters.
diff --git a/go/vt/vttls/crl.go b/go/vt/vttls/crl.go
index 4d2167a7c11..63c97ecfa81 100644
--- a/go/vt/vttls/crl.go
+++ b/go/vt/vttls/crl.go
@@ -18,7 +18,6 @@ package vttls
import (
"crypto/x509"
- "crypto/x509/pkix"
"encoding/pem"
"fmt"
"os"
@@ -29,12 +28,12 @@ import (
type verifyPeerCertificateFunc func([][]byte, [][]*x509.Certificate) error
-func certIsRevoked(cert *x509.Certificate, crl *pkix.CertificateList) bool {
- if crl.HasExpired(time.Now()) {
+func certIsRevoked(cert *x509.Certificate, crl *x509.RevocationList) bool {
+ if !time.Now().Before(crl.NextUpdate) {
log.Warningf("The current Certificate Revocation List (CRL) is past expiry date and must be updated. Revoked certificates will still be rejected in this state.")
}
- for _, revoked := range crl.TBSCertList.RevokedCertificates {
+ for _, revoked := range crl.RevokedCertificates {
if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
return true
}
@@ -54,7 +53,7 @@ func verifyPeerCertificateAgainstCRL(crl string) (verifyPeerCertificateFunc, err
cert := chain[i]
issuerCert := chain[i+1]
for _, crl := range crlSet {
- if issuerCert.CheckCRLSignature(crl) == nil {
+ if crl.CheckSignatureFrom(issuerCert) == nil {
if certIsRevoked(cert, crl) {
return fmt.Errorf("Certificate revoked: CommonName=%v", cert.Subject.CommonName)
}
@@ -66,13 +65,13 @@ func verifyPeerCertificateAgainstCRL(crl string) (verifyPeerCertificateFunc, err
}, nil
}
-func loadCRLSet(crl string) ([]*pkix.CertificateList, error) {
+func loadCRLSet(crl string) ([]*x509.RevocationList, error) {
body, err := os.ReadFile(crl)
if err != nil {
return nil, err
}
- crlSet := make([]*pkix.CertificateList, 0)
+ crlSet := make([]*x509.RevocationList, 0)
for len(body) > 0 {
var block *pem.Block
block, body = pem.Decode(body)
@@ -83,7 +82,7 @@ func loadCRLSet(crl string) ([]*pkix.CertificateList, error) {
continue
}
- parsedCRL, err := x509.ParseCRL(block.Bytes)
+ parsedCRL, err := x509.ParseRevocationList(block.Bytes)
if err != nil {
return nil, err
}
diff --git a/go/vt/workflow/long_polling_test.go b/go/vt/workflow/long_polling_test.go
index 85dbabd3f28..a1705dd2346 100644
--- a/go/vt/workflow/long_polling_test.go
+++ b/go/vt/workflow/long_polling_test.go
@@ -94,9 +94,11 @@ func TestLongPolling(t *testing.T) {
u.Path = "/workflow/action/1"
message := `{"path":"/uuid1","name":"button1"}`
buf := bytes.NewReader([]byte(message))
- if _, err := http.Post(u.String(), "application/json; charset=utf-8", buf); err != nil {
+ pResp, err := http.Post(u.String(), "application/json; charset=utf-8", buf)
+ if err != nil {
t.Fatalf("/action/1 post failed: %v", err)
}
+ pResp.Body.Close()
for timeout := 0; ; timeout++ {
// This is an asynchronous action, need to take the lock.
tw.mu.Lock()
diff --git a/go/vt/workflow/websocket_test.go b/go/vt/workflow/websocket_test.go
index e47b730e9ad..4a08422f532 100644
--- a/go/vt/workflow/websocket_test.go
+++ b/go/vt/workflow/websocket_test.go
@@ -46,10 +46,11 @@ func TestWebSocket(t *testing.T) {
// Start a client websocket.
u := url.URL{Scheme: "ws", Host: listener.Addr().String(), Path: "/workflow"}
- c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
+ c, resp, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
t.Fatalf("WebSocket dial failed: %v", err)
}
+ defer resp.Body.Close()
// Read the original full dump.
_, tree, err := c.ReadMessage()
diff --git a/go/vt/wrangler/doc_test.md b/go/vt/wrangler/doc_test.md
index 4fd445581da..c84a3720225 100644
--- a/go/vt/wrangler/doc_test.md
+++ b/go/vt/wrangler/doc_test.md
@@ -43,7 +43,7 @@ test the workflow state machine. There is no actual data being vreplicated.
#### The fake MySQLDaemon
-`go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go`
+`go/vt/mysqlctl/fakemysqldaemon.go`
Used to set primary positions to provide/validate gtids.
diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go
index f69d95f6c5a..5b7fc0c1873 100644
--- a/go/vt/wrangler/fake_dbclient_test.go
+++ b/go/vt/wrangler/fake_dbclient_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"regexp"
"strings"
+ "sync"
"testing"
"github.com/stretchr/testify/assert"
@@ -63,6 +64,7 @@ func (dbrs *dbResults) exhausted() bool {
// fakeDBClient fakes a binlog_player.DBClient.
type fakeDBClient struct {
+ mu sync.Mutex
name string
queries map[string]*dbResults
queriesRE map[string]*dbResults
@@ -86,6 +88,8 @@ func newFakeDBClient(name string) *fakeDBClient {
}
func (dc *fakeDBClient) addQuery(query string, result *sqltypes.Result, err error) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
if testMode == "debug" {
log.Infof("%s::addQuery %s\n\n", dc.id(), query)
}
@@ -98,6 +102,8 @@ func (dc *fakeDBClient) addQuery(query string, result *sqltypes.Result, err erro
}
func (dc *fakeDBClient) addQueryRE(query string, result *sqltypes.Result, err error) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
if testMode == "debug" {
log.Infof("%s::addQueryRE %s\n\n", dc.id(), query)
}
@@ -110,11 +116,15 @@ func (dc *fakeDBClient) addQueryRE(query string, result *sqltypes.Result, err er
}
func (dc *fakeDBClient) getInvariant(query string) *sqltypes.Result {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
return dc.invariants[query]
}
// note: addInvariant will replace a previous result for a query with the provided one: this is used in the tests
func (dc *fakeDBClient) addInvariant(query string, result *sqltypes.Result) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
if testMode == "debug" {
log.Infof("%s::addInvariant %s\n\n", dc.id(), query)
}
@@ -156,6 +166,8 @@ func (dc *fakeDBClient) id() string {
// ExecuteFetch is part of the DBClient interface
func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
qr, err := dc.executeFetch(query, maxrows)
if testMode == "debug" {
log.Infof("%s::ExecuteFetch for >>>%s<<< returns >>>%v<<< error >>>%+v<<< ", dc.id(), query, qr, err)
@@ -187,6 +199,8 @@ func (dc *fakeDBClient) executeFetch(query string, maxrows int) (*sqltypes.Resul
}
func (dc *fakeDBClient) verifyQueries(t *testing.T) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
t.Helper()
for query, dbrs := range dc.queries {
if !dbrs.exhausted() {
diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go
index 9fdb6e616a1..37aeeed6fa3 100644
--- a/go/vt/wrangler/fake_tablet_test.go
+++ b/go/vt/wrangler/fake_tablet_test.go
@@ -29,9 +29,11 @@ import (
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/netutil"
"vitess.io/vitess/go/vt/dbconfigs"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vttablet/grpctmserver"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "vitess.io/vitess/go/vt/vttablet/queryservice/fakes"
"vitess.io/vitess/go/vt/vttablet/tabletconntest"
"vitess.io/vitess/go/vt/vttablet/tabletmanager"
"vitess.io/vitess/go/vt/vttablet/tabletservermock"
@@ -48,6 +50,12 @@ import (
_ "vitess.io/vitess/go/vt/vttablet/grpctabletconn"
)
+func init() {
+ // Ensure we will use the right protocol (gRPC) in all unit tests.
+ tabletconntest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc")
+ tmclienttest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc")
+}
+
// This file was copied from testlib. All tests from testlib should be moved
// to the current directory. In order to move tests from there, we have to
// remove the circular dependency it causes (through vtctl dependence).
@@ -67,7 +75,7 @@ type fakeTablet struct {
// We also create the RPCServer, so users can register more services
// before calling StartActionLoop().
Tablet *topodatapb.Tablet
- FakeMysqlDaemon *fakemysqldaemon.FakeMysqlDaemon
+ FakeMysqlDaemon *mysqlctl.FakeMysqlDaemon
RPCServer *grpc.Server
// The following fields are created when we start the event loop for
@@ -81,6 +89,8 @@ type fakeTablet struct {
StartHTTPServer bool
HTTPListener net.Listener
HTTPServer *http.Server
+
+ queryservice.QueryService
}
// TabletOption is an interface for changing tablet parameters.
@@ -134,13 +144,14 @@ func newFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletTy
}
// create a FakeMysqlDaemon with the right information by default
- fakeMysqlDaemon := fakemysqldaemon.NewFakeMysqlDaemon(db)
+ fakeMysqlDaemon := mysqlctl.NewFakeMysqlDaemon(db)
fakeMysqlDaemon.MysqlPort.Set(mysqlPort)
return &fakeTablet{
Tablet: tablet,
FakeMysqlDaemon: fakeMysqlDaemon,
RPCServer: grpc.NewServer(),
+ QueryService: fakes.ErrorQueryService,
}
}
@@ -238,8 +249,14 @@ func (ft *fakeTablet) Target() querypb.Target {
}
}
-func init() {
- // enforce we will use the right protocol (gRPC) in all unit tests
- tabletconntest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc")
- tmclienttest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc")
+func (ft *fakeTablet) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error {
+ return callback(&querypb.StreamHealthResponse{
+ Serving: true,
+ Target: &querypb.Target{
+ Keyspace: ft.Tablet.Keyspace,
+ Shard: ft.Tablet.Shard,
+ TabletType: ft.Tablet.Type,
+ },
+ RealtimeStats: &querypb.RealtimeStats{},
+ })
}
diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go
index c5579fc7579..fdd4b2c3007 100644
--- a/go/vt/wrangler/materializer.go
+++ b/go/vt/wrangler/materializer.go
@@ -227,6 +227,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta
if err := topotools.SaveRoutingRules(ctx, wr.ts, rules); err != nil {
return err
}
+
if vschema != nil {
// We added to the vschema.
if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil {
@@ -891,6 +892,39 @@ func getMigrationID(targetKeyspace string, shardTablets []string) (int64, error)
return int64(hasher.Sum64() & math.MaxInt64), nil
}
+// createDefaultShardRoutingRules creates a reverse routing rule for
+// each shard in a new partial keyspace migration workflow that does
+// not already have an existing routing rule in place.
+func (wr *Wrangler) createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.MaterializeSettings) error {
+ srr, err := topotools.GetShardRoutingRules(ctx, wr.ts)
+ if err != nil {
+ return err
+ }
+ allShards, err := wr.sourceTs.GetServingShards(ctx, ms.SourceKeyspace)
+ if err != nil {
+ return err
+ }
+ changed := false
+ for _, si := range allShards {
+ fromSource := fmt.Sprintf("%s.%s", ms.SourceKeyspace, si.ShardName())
+ fromTarget := fmt.Sprintf("%s.%s", ms.TargetKeyspace, si.ShardName())
+ if srr[fromSource] == "" && srr[fromTarget] == "" {
+ srr[fromTarget] = ms.SourceKeyspace
+ changed = true
+ wr.Logger().Infof("Added default shard routing rule from %q to %q", fromTarget, fromSource)
+ }
+ }
+ if changed {
+ if err := topotools.SaveShardRoutingRules(ctx, wr.ts, srr); err != nil {
+ return err
+ }
+ if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func (wr *Wrangler) prepareMaterializerStreams(ctx context.Context, ms *vtctldatapb.MaterializeSettings) (*materializer, error) {
if err := wr.validateNewWorkflow(ctx, ms.TargetKeyspace, ms.Workflow); err != nil {
return nil, err
@@ -899,6 +933,11 @@ func (wr *Wrangler) prepareMaterializerStreams(ctx context.Context, ms *vtctldat
if err != nil {
return nil, err
}
+ if mz.isPartial {
+ if err := wr.createDefaultShardRoutingRules(ctx, ms); err != nil {
+ return nil, err
+ }
+ }
if err := mz.deploySchema(ctx); err != nil {
return nil, err
}
diff --git a/go/vt/wrangler/shard.go b/go/vt/wrangler/shard.go
index 8ea85290022..695f17b2f75 100644
--- a/go/vt/wrangler/shard.go
+++ b/go/vt/wrangler/shard.go
@@ -113,7 +113,7 @@ func (wr *Wrangler) DeleteShard(ctx context.Context, keyspace, shard string, rec
// GetTabletMap ignores ErrNoNode, and it's good for
// our purpose, it means a tablet was deleted but is
// still referenced.
- tabletMap, err := wr.ts.GetTabletMap(ctx, aliases)
+ tabletMap, err := wr.ts.GetTabletMap(ctx, aliases, nil)
if err != nil {
return fmt.Errorf("GetTabletMap() failed: %v", err)
}
diff --git a/go/vt/wrangler/split.go b/go/vt/wrangler/split.go
index d780fa10025..d77677bc48f 100644
--- a/go/vt/wrangler/split.go
+++ b/go/vt/wrangler/split.go
@@ -41,7 +41,7 @@ const (
// on a Shard.
func (wr *Wrangler) SetSourceShards(ctx context.Context, keyspace, shard string, sources []*topodatapb.TabletAlias, tables []string) error {
// Read the source tablets.
- sourceTablets, err := wr.ts.GetTabletMap(ctx, sources)
+ sourceTablets, err := wr.ts.GetTabletMap(ctx, sources, nil)
if err != nil {
return err
}
diff --git a/go/vt/wrangler/switcher.go b/go/vt/wrangler/switcher.go
index 27ff564dd17..e46b62c57e8 100644
--- a/go/vt/wrangler/switcher.go
+++ b/go/vt/wrangler/switcher.go
@@ -40,6 +40,10 @@ func (r *switcher) deleteRoutingRules(ctx context.Context) error {
return r.ts.deleteRoutingRules(ctx)
}
+func (r *switcher) deleteShardRoutingRules(ctx context.Context) error {
+ return r.ts.deleteShardRoutingRules(ctx)
+}
+
func (r *switcher) dropSourceDeniedTables(ctx context.Context) error {
return r.ts.dropSourceDeniedTables(ctx)
}
diff --git a/go/vt/wrangler/switcher_dry_run.go b/go/vt/wrangler/switcher_dry_run.go
index 832f5f1917f..4c1114d6c9d 100644
--- a/go/vt/wrangler/switcher_dry_run.go
+++ b/go/vt/wrangler/switcher_dry_run.go
@@ -47,6 +47,13 @@ func (dr *switcherDryRun) deleteRoutingRules(ctx context.Context) error {
return nil
}
+func (dr *switcherDryRun) deleteShardRoutingRules(ctx context.Context) error {
+ if dr.ts.isPartialMigration {
+ dr.drLog.Log("Shard routing rules for participating shards will be deleted")
+ }
+ return nil
+}
+
func (dr *switcherDryRun) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction workflow.TrafficSwitchDirection) error {
sourceShards := make([]string, 0)
targetShards := make([]string, 0)
diff --git a/go/vt/wrangler/switcher_interface.go b/go/vt/wrangler/switcher_interface.go
index 2e400b69ac7..26bd5f53a63 100644
--- a/go/vt/wrangler/switcher_interface.go
+++ b/go/vt/wrangler/switcher_interface.go
@@ -50,6 +50,7 @@ type iswitcher interface {
removeTargetTables(ctx context.Context) error
dropTargetShards(ctx context.Context) error
deleteRoutingRules(ctx context.Context) error
+ deleteShardRoutingRules(ctx context.Context) error
addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error
logs() *[]string
}
diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go
index 7cdb7172e19..0dc1beb6193 100644
--- a/go/vt/wrangler/testlib/backup_test.go
+++ b/go/vt/wrangler/testlib/backup_test.go
@@ -183,7 +183,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
}
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// This first set of STOP and START commands come from
@@ -194,7 +194,6 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
// These commands come from SetReplicationSource RPC called
// to set the correct primary and semi-sync after Backup has concluded
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -232,13 +231,13 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
}
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
"RESET SLAVE ALL",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -261,7 +260,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
RelayLogInfoPath: path.Join(root, "relay-log.info"),
}
- err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */)
+ err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */, []string{} /* ignoreBackupEngines */)
if err != nil {
return err
}
@@ -291,7 +290,6 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
"STOP SLAVE",
"RESET SLAVE ALL",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -299,7 +297,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
primary.FakeMysqlDaemon.SetReplicationPositionPos = primary.FakeMysqlDaemon.CurrentPrimaryPosition
// restore primary from latest backup
- require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */),
+ require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, []string{} /* ignoreBackupEngines */),
"RestoreData failed")
// tablet was created as PRIMARY, so it's baseTabletType is PRIMARY
assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type)
@@ -315,7 +313,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error {
}
// Test restore with the backup timestamp
- require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime),
+ require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime, []string{} /* ignoreBackupEngines */),
"RestoreData with backup timestamp failed")
assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type)
assert.False(t, primary.FakeMysqlDaemon.Replicating)
@@ -420,7 +418,7 @@ func TestBackupRestoreLagged(t *testing.T) {
sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)}
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// This first set of STOP and START commands come from
@@ -431,7 +429,6 @@ func TestBackupRestoreLagged(t *testing.T) {
// These commands come from SetReplicationSource RPC called
// to set the correct primary and semi-sync after Backup has concluded
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -490,13 +487,13 @@ func TestBackupRestoreLagged(t *testing.T) {
}
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
"RESET SLAVE ALL",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -521,7 +518,7 @@ func TestBackupRestoreLagged(t *testing.T) {
errCh = make(chan error, 1)
go func(ctx context.Context, tablet *FakeTablet) {
- errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */)
+ errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, []string{} /* ignoreBackupEngines */)
}(ctx, destTablet)
timer = time.NewTicker(1 * time.Second)
@@ -643,7 +640,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)}
sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// This first set of STOP and START commands come from
@@ -654,7 +651,6 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
// These commands come from SetReplicationSource RPC called
// to set the correct primary and semi-sync after Backup has concluded
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -685,13 +681,13 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
}
destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
"RESET SLAVE ALL",
"FAKE SET SLAVE POSITION",
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -720,7 +716,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) {
// set a short timeout so that we don't have to wait 30 seconds
topo.RemoteOperationTimeout = 2 * time.Second
// Restore should still succeed
- require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */))
+ require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, []string{} /* ignoreBackupEngines */))
// verify the full status
require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed")
assert.True(t, destTablet.FakeMysqlDaemon.Replicating)
@@ -878,7 +874,7 @@ func TestDisableActiveReparents(t *testing.T) {
RelayLogInfoPath: path.Join(root, "relay-log.info"),
}
- require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */))
+ require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, []string{} /* ignoreBackupEngines */))
// verify the full status
require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed")
assert.False(t, destTablet.FakeMysqlDaemon.Replicating)
diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go
index 8620d7e7565..d192aa3d42e 100644
--- a/go/vt/wrangler/testlib/copy_schema_shard_test.go
+++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go
@@ -17,14 +17,13 @@ limitations under the License.
package testlib
import (
+ "context"
"fmt"
"testing"
"time"
"vitess.io/vitess/go/vt/discovery"
- "context"
-
"vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/logutil"
@@ -75,7 +74,7 @@ func copySchema(t *testing.T, useShardAsSource bool) {
topodatapb.TabletType_RDONLY, sourceRdonlyDb, TabletKeyspaceShard(t, "ks", "-80"))
sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -172,6 +171,10 @@ func copySchema(t *testing.T, useShardAsSource bool) {
if useShardAsSource {
source = "ks/-80"
}
+
+ // PrimaryAlias in the shard record is updated asynchronously, so we should wait for it to succeed.
+ waitForShardPrimary(t, wr, destinationPrimary.Tablet)
+
if err := vp.Run([]string{"CopySchemaShard", "--include-views", source, "ks/-40"}); err != nil {
t.Fatalf("CopySchemaShard failed: %v", err)
}
diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go
index 014029504a9..8d80fa7d68f 100644
--- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go
+++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go
@@ -138,12 +138,11 @@ func TestEmergencyReparentShard(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE IO_THREAD",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -170,10 +169,9 @@ func TestEmergencyReparentShard(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
goodReplica2.StartActionLoop(t, wr)
@@ -235,7 +233,6 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) {
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE IO_THREAD",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -272,12 +269,11 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) {
newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition)
moreAdvancedReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE IO_THREAD",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go
index a0c065261a0..8d928926550 100644
--- a/go/vt/wrangler/testlib/external_reparent_test.go
+++ b/go/vt/wrangler/testlib/external_reparent_test.go
@@ -17,14 +17,13 @@ limitations under the License.
package testlib
import (
+ "context"
"flag"
"testing"
"time"
"vitess.io/vitess/go/vt/discovery"
- "context"
-
"github.com/stretchr/testify/assert"
"vitess.io/vitess/go/vt/logutil"
@@ -91,7 +90,6 @@ func TestTabletExternallyReparentedBasic(t *testing.T) {
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START Replica",
}
@@ -171,7 +169,6 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) {
// primary is still good to go.
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START Replica",
}
@@ -250,7 +247,6 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) {
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START Replica",
}
@@ -264,7 +260,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) {
goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -339,7 +335,6 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) {
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START Replica",
}
@@ -353,7 +348,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) {
goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -424,7 +419,6 @@ func TestTabletExternallyReparentedRerun(t *testing.T) {
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START Replica",
}
@@ -438,7 +432,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) {
// TabletActionReplicaWasRestarted.
goodReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -556,6 +550,9 @@ func TestRPCTabletExternallyReparentedDemotesPrimaryToConfiguredTabletType(t *te
}
}
+ // PrimaryAlias in the shard record is updated asynchronously, so we should wait for it to succeed.
+ waitForShardPrimary(t, wr, newPrimary.Tablet)
+
shardInfo, err := ts.GetShard(context.Background(), newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard)
assert.NoError(t, err)
diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go
index ff8d0457517..5c85e37d43a 100644
--- a/go/vt/wrangler/testlib/fake_tablet.go
+++ b/go/vt/wrangler/testlib/fake_tablet.go
@@ -34,7 +34,7 @@ import (
"vitess.io/vitess/go/netutil"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/dbconfigs"
- "vitess.io/vitess/go/vt/mysqlctl/fakemysqldaemon"
+ "vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vttablet/grpctmserver"
@@ -69,7 +69,7 @@ type FakeTablet struct {
// We also create the RPCServer, so users can register more services
// before calling StartActionLoop().
Tablet *topodatapb.Tablet
- FakeMysqlDaemon *fakemysqldaemon.FakeMysqlDaemon
+ FakeMysqlDaemon *mysqlctl.FakeMysqlDaemon
RPCServer *grpc.Server
// The following fields are created when we start the event loop for
@@ -159,7 +159,7 @@ func NewFakeTablet(t *testing.T, wr *wrangler.Wrangler, cell string, uid uint32,
}
// create a FakeMysqlDaemon with the right information by default
- fakeMysqlDaemon := fakemysqldaemon.NewFakeMysqlDaemon(db)
+ fakeMysqlDaemon := mysqlctl.NewFakeMysqlDaemon(db)
fakeMysqlDaemon.MysqlPort.Set(mysqlPort)
return &FakeTablet{
@@ -245,7 +245,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) {
// StopActionLoop will stop the Action Loop for the given FakeTablet
func (ft *FakeTablet) StopActionLoop(t *testing.T) {
if ft.TM == nil {
- t.Fatalf("TM for %v is not running", ft.Tablet.Alias)
+ return
}
if ft.StartHTTPServer {
ft.HTTPListener.Close()
diff --git a/go/vt/wrangler/testlib/permissions_test.go b/go/vt/wrangler/testlib/permissions_test.go
index 1a023355d99..9d2a950c8e3 100644
--- a/go/vt/wrangler/testlib/permissions_test.go
+++ b/go/vt/wrangler/testlib/permissions_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package testlib
import (
+ "context"
"strings"
"testing"
"time"
@@ -24,8 +25,6 @@ import (
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/topo/topoproto"
- "context"
-
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo"
@@ -565,7 +564,7 @@ func TestPermissions(t *testing.T) {
replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go
index 50e27926cdd..cc761b9f130 100644
--- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go
+++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go
@@ -79,7 +79,6 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) {
}
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -97,11 +96,9 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) {
oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
// we end up calling SetReplicationSource twice on the old primary
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -118,11 +115,10 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -198,7 +194,6 @@ func TestPlannedReparentShardNoError(t *testing.T) {
}
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -216,11 +211,9 @@ func TestPlannedReparentShardNoError(t *testing.T) {
oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
// we end up calling SetReplicationSource twice on the old primary
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -237,11 +230,10 @@ func TestPlannedReparentShardNoError(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -253,10 +245,9 @@ func TestPlannedReparentShardNoError(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
goodReplica2.StartActionLoop(t, wr)
@@ -343,7 +334,6 @@ func TestPlannedReparentInitialization(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -356,7 +346,6 @@ func TestPlannedReparentInitialization(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
goodReplica2.StartActionLoop(t, wr)
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
defer goodReplica2.StopActionLoop(t)
@@ -427,7 +416,6 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) {
}
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -445,7 +433,6 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) {
oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.PromoteResult
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -461,11 +448,10 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -477,10 +463,9 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
goodReplica2.StartActionLoop(t, wr)
@@ -541,7 +526,6 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) {
}
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -558,7 +542,6 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) {
oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -574,11 +557,10 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -590,10 +572,9 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
goodReplica2.StartActionLoop(t, wr)
@@ -655,7 +636,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// simulate error that will trigger a call to RestartReplication
@@ -740,7 +721,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) {
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// simulate error that will trigger a call to RestartReplication
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
// In SetReplicationSource, we find that the source host and port was already set correctly,
@@ -819,7 +800,6 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
}
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -837,7 +817,6 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0]
oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet))
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -853,11 +832,10 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -869,10 +847,9 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
goodReplica2.StartActionLoop(t, wr)
@@ -893,12 +870,10 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
newPrimary.FakeMysqlDaemon.PromoteError = nil
newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
// extra commands because of retry
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
"CREATE DATABASE IF NOT EXISTS _vt",
@@ -907,11 +882,9 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) {
"SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES",
}
oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
// extra commands because of retry
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -974,11 +947,10 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) {
goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -990,10 +962,9 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) {
goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet))
goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
}
goodReplica2.StartActionLoop(t, wr)
diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go
index 315ebce9638..5e97e1e9be8 100644
--- a/go/vt/wrangler/testlib/reparent_utils_test.go
+++ b/go/vt/wrangler/testlib/reparent_utils_test.go
@@ -17,16 +17,18 @@ limitations under the License.
package testlib
import (
+ "context"
+ "errors"
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil"
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/vtctl/reparentutil"
- "context"
-
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo"
@@ -92,7 +94,7 @@ func TestShardReplicationStatuses(t *testing.T) {
replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -161,11 +163,10 @@ func TestReparentTablet(t *testing.T) {
replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
"STOP SLAVE",
- "RESET SLAVE ALL",
"FAKE SET MASTER",
"START SLAVE",
}
@@ -183,3 +184,98 @@ func TestReparentTablet(t *testing.T) {
}
checkSemiSyncEnabled(t, false, true, replica)
}
+
+// TestSetReplicationSource tests that SetReplicationSource works as intended under various circumstances.
+func TestSetReplicationSource(t *testing.T) {
+ ctx := context.Background()
+ ts := memorytopo.NewServer("cell1", "cell2")
+ wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())
+
+ // create shard and tablets
+ _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0")
+ require.NoError(t, err, "CreateShard failed")
+
+ primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil)
+ reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync")
+
+ // mark the primary inside the shard
+ _, err = ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = primary.Tablet.Alias
+ return nil
+ })
+ require.NoError(t, err, "UpdateShardFields failed")
+
+ // primary action loop (to initialize host and port)
+ primary.StartActionLoop(t, wr)
+ defer primary.StopActionLoop(t)
+
+ // test when we receive a relay log error while starting replication
+ t.Run("Relay log error", func(t *testing.T) {
+ replica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil)
+ // replica loop
+ // We have to set the settings as replicating. Otherwise,
+ // the replication manager intervenes and tries to fix replication,
+ // which ends up making this test unpredictable.
+ replica.FakeMysqlDaemon.Replicating = true
+ replica.FakeMysqlDaemon.IOThreadRunning = true
+ replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
+ replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
+ // These 3 statements come from tablet startup
+ "STOP SLAVE",
+ "FAKE SET MASTER",
+ "START SLAVE",
+ // We stop and reset the replication parameters because of relay log issues.
+ "STOP SLAVE",
+ "RESET SLAVE",
+ "START SLAVE",
+ }
+ replica.StartActionLoop(t, wr)
+ defer replica.StopActionLoop(t)
+
+ // Set the correct error message that indicates we have received a relay log error.
+ replica.FakeMysqlDaemon.SetReplicationSourceError = errors.New("ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log")
+ // run ReparentTablet
+ err = wr.SetReplicationSource(ctx, replica.Tablet)
+ require.NoError(t, err, "SetReplicationSource failed")
+
+ // check what was run
+ err = replica.FakeMysqlDaemon.CheckSuperQueryList()
+ require.NoError(t, err, "CheckSuperQueryList failed")
+ checkSemiSyncEnabled(t, false, true, replica)
+ })
+
+ // test setting an empty hostname because of primary shutdown
+ t.Run("Primary tablet already shutdown", func(t *testing.T) {
+ replica := NewFakeTablet(t, wr, "cell1", 3, topodatapb.TabletType_REPLICA, nil)
+ // replica loop
+ replica.FakeMysqlDaemon.Replicating = true
+ replica.FakeMysqlDaemon.IOThreadRunning = true
+ replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet))
+ replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
+ // These 3 statements come from tablet startup
+ "STOP SLAVE",
+ "FAKE SET MASTER",
+ "START SLAVE",
+ // For the SetReplicationSource call, we shouldn't get any queries at all!
+ }
+ replica.StartActionLoop(t, wr)
+ defer replica.StopActionLoop(t)
+
+ // stop the primary
+ primary.StopActionLoop(t)
+ // update the primary topo record
+ wr.TopoServer().UpdateTabletFields(ctx, primary.Tablet.Alias, func(tablet *topodatapb.Tablet) error {
+ tablet.MysqlHostname = ""
+ return nil
+ })
+
+ // run SetReplicationSource
+ err = wr.SetReplicationSource(ctx, replica.Tablet)
+ require.ErrorContains(t, err, "Shard primary has empty mysql hostname")
+
+ // check what was run
+ err = replica.FakeMysqlDaemon.CheckSuperQueryList()
+ require.NoError(t, err, "CheckSuperQueryList failed")
+ checkSemiSyncEnabled(t, false, true, replica)
+ })
+}
diff --git a/go/vt/wrangler/testlib/utils.go b/go/vt/wrangler/testlib/utils.go
new file mode 100644
index 00000000000..ae32e5036bc
--- /dev/null
+++ b/go/vt/wrangler/testlib/utils.go
@@ -0,0 +1,57 @@
+package testlib
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/wrangler"
+)
+
+// waitForTabletType waits for the given tablet type to be reached.
+func waitForTabletType(t *testing.T, wr *wrangler.Wrangler, tabletAlias *topodatapb.TabletAlias, tabletType topodatapb.TabletType) {
+ timeout := time.After(15 * time.Second)
+ for {
+ tablet, err := wr.TopoServer().GetTablet(context.Background(), tabletAlias)
+ require.NoError(t, err)
+ if tablet.Type == tabletType {
+ return
+ }
+
+ select {
+ case <-timeout:
+ t.Fatalf("%s didn't reach the tablet type %v", topoproto.TabletAliasString(tabletAlias), tabletType.String())
+ return
+ default:
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+}
+
+// waitForShardPrimary waits for the shard record to be upto date such that it has the given primary.
+func waitForShardPrimary(t *testing.T, wr *wrangler.Wrangler, primaryTablet *topodatapb.Tablet) {
+ timeout := time.After(15 * time.Second)
+ for {
+ si, err := wr.TopoServer().GetShard(context.Background(), primaryTablet.Keyspace, primaryTablet.Shard)
+ require.NoError(t, err)
+ if topoproto.TabletAliasEqual(si.PrimaryAlias, primaryTablet.Alias) {
+ return
+ }
+
+ select {
+ case <-timeout:
+ t.Fatalf("%s/%s didn't see the tablet %v become the primary, instead it is %v",
+ primaryTablet.Keyspace, primaryTablet.Shard,
+ topoproto.TabletAliasString(primaryTablet.Alias),
+ topoproto.TabletAliasString(si.PrimaryAlias),
+ )
+ return
+ default:
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+}
diff --git a/go/vt/wrangler/testlib/version_test.go b/go/vt/wrangler/testlib/version_test.go
index fbd8610e437..c54a0811948 100644
--- a/go/vt/wrangler/testlib/version_test.go
+++ b/go/vt/wrangler/testlib/version_test.go
@@ -94,7 +94,7 @@ func TestVersion(t *testing.T) {
sourceReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(sourceReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(sourcePrimary.Tablet))
sourceReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{
// These 3 statements come from tablet startup
- "RESET SLAVE ALL",
+ "STOP SLAVE",
"FAKE SET MASTER",
"START SLAVE",
}
diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go
index 9a07150c117..6dd04501d86 100644
--- a/go/vt/wrangler/traffic_switcher.go
+++ b/go/vt/wrangler/traffic_switcher.go
@@ -211,9 +211,10 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl
ws := workflow.NewServer(wr.ts, wr.tmc)
state := &workflow.State{
- Workflow: workflowName,
- SourceKeyspace: ts.SourceKeyspaceName(),
- TargetKeyspace: targetKeyspace,
+ Workflow: workflowName,
+ SourceKeyspace: ts.SourceKeyspaceName(),
+ TargetKeyspace: targetKeyspace,
+ IsPartialMigration: ts.isPartialMigration,
}
var (
@@ -221,9 +222,11 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl
keyspace string
)
- // we reverse writes by using the source_keyspace.workflowname_reverse workflow spec, so we need to use the
- // source of the reverse workflow, which is the target of the workflow initiated by the user for checking routing rules
- // Similarly we use a target shard of the reverse workflow as the original source to check if writes have been switched
+ // We reverse writes by using the source_keyspace.workflowname_reverse workflow
+ // spec, so we need to use the source of the reverse workflow, which is the
+ // target of the workflow initiated by the user for checking routing rules.
+ // Similarly we use a target shard of the reverse workflow as the original
+ // source to check if writes have been switched.
if strings.HasSuffix(workflowName, "_reverse") {
reverse = true
keyspace = state.SourceKeyspace
@@ -234,7 +237,7 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl
if ts.MigrationType() == binlogdatapb.MigrationType_TABLES {
state.WorkflowType = workflow.TypeMoveTables
- // we assume a consistent state, so only choose routing rule for one table for replica/rdonly
+ // We assume a consistent state, so only choose routing rule for one table.
if len(ts.Tables()) == 0 {
return nil, nil, fmt.Errorf("no tables in workflow %s.%s", keyspace, workflowName)
@@ -242,14 +245,17 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl
table := ts.Tables()[0]
if ts.isPartialMigration { // shard level traffic switching is all or nothing
- shardRules, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer())
+ shardRoutingRules, err := wr.ts.GetShardRoutingRules(ctx)
if err != nil {
return nil, nil, err
}
- for _, sourceShard := range ts.SourceShards() {
- if _, ok := shardRules[fmt.Sprintf("%s.%s", ts.sourceKeyspace, sourceShard.ShardName())]; ok {
- state.WritesPartiallySwitched = true // and in effect reads are too
- break
+
+ rules := shardRoutingRules.Rules
+ for _, rule := range rules {
+ if rule.ToKeyspace == ts.SourceKeyspaceName() {
+ state.ShardsNotYetSwitched = append(state.ShardsNotYetSwitched, rule.Shard)
+ } else {
+ state.ShardsAlreadySwitched = append(state.ShardsAlreadySwitched, rule.Shard)
}
}
} else {
@@ -391,7 +397,7 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam
return sw.logs(), nil
}
wr.Logger().Infof("About to switchShardReads: %+v, %+v, %+v", cells, servedTypes, direction)
- if err := ts.switchShardReads(ctx, cells, servedTypes, direction); err != nil {
+ if err := sw.switchShardReads(ctx, cells, servedTypes, direction); err != nil {
ts.Logger().Errorf("switchShardReads failed: %v", err)
return nil, err
}
@@ -428,7 +434,7 @@ func (wr *Wrangler) areTabletsAvailableToStreamFrom(ctx context.Context, ts *tra
if cells == nil {
cells = append(cells, shard.PrimaryAlias.Cell)
}
- tp, err := discovery.NewTabletPicker(wr.ts, cells, keyspace, shard.ShardName(), tabletTypes)
+ tp, err := discovery.NewTabletPicker(ctx, wr.ts, cells, shard.PrimaryAlias.Cell, keyspace, shard.ShardName(), tabletTypes, discovery.TabletPickerOptions{})
if err != nil {
allErrors.RecordError(err)
return
@@ -696,6 +702,9 @@ func (wr *Wrangler) dropArtifacts(ctx context.Context, keepRoutingRules bool, sw
if err := sw.deleteRoutingRules(ctx); err != nil {
return err
}
+ if err := sw.deleteShardRoutingRules(ctx); err != nil {
+ return err
+ }
}
return nil
@@ -1517,6 +1526,23 @@ func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error {
return nil
}
+func (ts *trafficSwitcher) deleteShardRoutingRules(ctx context.Context) error {
+ if !ts.isPartialMigration {
+ return nil
+ }
+ srr, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer())
+ if err != nil {
+ return err
+ }
+ for _, si := range ts.TargetShards() {
+ delete(srr, fmt.Sprintf("%s.%s", ts.targetKeyspace, si.ShardName()))
+ }
+ if err := topotools.SaveShardRoutingRules(ctx, ts.TopoServer(), srr); err != nil {
+ return err
+ }
+ return nil
+}
+
func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error {
return ts.ForAllSources(func(source *workflow.MigrationSource) error {
query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName()))
diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go
index af0a6bea2f3..963be5572ab 100644
--- a/go/vt/wrangler/traffic_switcher_env_test.go
+++ b/go/vt/wrangler/traffic_switcher_env_test.go
@@ -18,9 +18,14 @@ package wrangler
import (
"fmt"
+ "math/rand"
+ "sync"
"testing"
"time"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/semaphore"
+
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/mysql/fakesqldb"
@@ -30,6 +35,7 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
+ "vitess.io/vitess/go/vt/grpcclient"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/logutil"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
@@ -39,6 +45,9 @@ import (
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/memorytopo"
"vitess.io/vitess/go/vt/topotools"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+ "vitess.io/vitess/go/vt/vttablet/tabletconntest"
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
"vitess.io/vitess/go/vt/vttablet/tmclient"
)
@@ -74,6 +83,7 @@ type testMigraterEnv struct {
sourceKeyRanges []*topodatapb.KeyRange
targetKeyRanges []*topodatapb.KeyRange
tmeDB *fakesqldb.DB
+ mu sync.Mutex
}
// testShardMigraterEnv has some convenience functions for adding expected queries.
@@ -135,6 +145,19 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards,
tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange)
}
+ dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000))
+ tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ tme.mu.Lock()
+ defer tme.mu.Unlock()
+ for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) {
+ if ft.Tablet.Alias.Uid == tablet.Alias.Uid {
+ return ft, nil
+ }
+ }
+ return nil, nil
+ })
+ tabletconntest.SetProtocol("go.vt.wrangler.traffic_switcher_env_test", dialerName)
+
vs := &vschemapb.Keyspace{
Sharded: true,
Vindexes: map[string]*vschemapb.Vindex{
@@ -260,6 +283,169 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards,
return tme
}
+// newTestTablePartialMigrater creates a test tablet migrater
+// specifially for partial or shard by shard migrations.
+// The shards must be the same on the source and target, and we
+// must be moving a subset of them.
+// fmtQuery should be of the form: 'select a, b %s group by a'.
+// The test will Sprintf a from clause and where clause as needed.
+func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shardsToMove []string, fmtQuery string) *testMigraterEnv {
+ require.Greater(t, len(shards), 1, "shard by shard migrations can only be done on sharded keyspaces")
+ tme := &testMigraterEnv{}
+ tme.ts = memorytopo.NewServer("cell1", "cell2")
+ tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient())
+ tme.wr.sem = semaphore.NewWeighted(1)
+ tme.sourceShards = shards
+ tme.targetShards = shards
+ tme.tmeDB = fakesqldb.New(t)
+ expectVDiffQueries(tme.tmeDB)
+ tabletID := 10
+ for _, shard := range tme.sourceShards {
+ tme.sourcePrimaries = append(tme.sourcePrimaries, newFakeTablet(t, tme.wr, "cell1", uint32(tabletID), topodatapb.TabletType_PRIMARY, tme.tmeDB, TabletKeyspaceShard(t, "ks1", shard)))
+ tabletID += 10
+
+ _, sourceKeyRange, err := topo.ValidateShardName(shard)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tme.sourceKeyRanges = append(tme.sourceKeyRanges, sourceKeyRange)
+ }
+ tpChoiceTablet := tme.sourcePrimaries[0].Tablet
+ tpChoice = &testTabletPickerChoice{
+ keyspace: tpChoiceTablet.Keyspace,
+ shard: tpChoiceTablet.Shard,
+ }
+ for _, shard := range tme.targetShards {
+ tme.targetPrimaries = append(tme.targetPrimaries, newFakeTablet(t, tme.wr, "cell1", uint32(tabletID), topodatapb.TabletType_PRIMARY, tme.tmeDB, TabletKeyspaceShard(t, "ks2", shard)))
+ tabletID += 10
+
+ _, targetKeyRange, err := topo.ValidateShardName(shard)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange)
+ }
+
+ dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000))
+ tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ tme.mu.Lock()
+ defer tme.mu.Unlock()
+ for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) {
+ if ft.Tablet.Alias.Uid == tablet.Alias.Uid {
+ return ft, nil
+ }
+ }
+ return nil, nil
+ })
+ tabletconntest.SetProtocol("go.vt.wrangler.traffic_switcher_env_test", dialerName)
+
+ vs := &vschemapb.Keyspace{
+ Sharded: true,
+ Vindexes: map[string]*vschemapb.Vindex{
+ "hash": {
+ Type: "hash",
+ },
+ },
+ Tables: map[string]*vschemapb.Table{
+ "t1": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "c1",
+ Name: "hash",
+ }},
+ },
+ "t2": {
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: "c1",
+ Name: "hash",
+ }},
+ },
+ },
+ }
+ err := tme.ts.SaveVSchema(ctx, "ks1", vs)
+ require.NoError(t, err)
+ err = tme.ts.SaveVSchema(ctx, "ks2", vs)
+ require.NoError(t, err)
+ err = tme.ts.RebuildSrvVSchema(ctx, nil)
+ require.NoError(t, err)
+ err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks1", []string{"cell1"}, false)
+ require.NoError(t, err)
+ err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks2", []string{"cell1"}, false)
+ require.NoError(t, err)
+
+ tme.startTablets(t)
+ tme.createDBClients(ctx, t)
+ tme.setPrimaryPositions()
+ now := time.Now().Unix()
+
+ for i, shard := range shards {
+ for _, shardToMove := range shardsToMove {
+ var streamInfoRows []string
+ var streamExtInfoRows []string
+ if shardToMove == shard {
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: "ks1",
+ Shard: shard,
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t1 where in_keyrange('%s')", shard)),
+ }, {
+ Match: "t2",
+ Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t2 where in_keyrange('%s')", shard)),
+ }},
+ },
+ }
+ streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v|||", i+1, bls))
+ streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0||||0", i+1, now, now))
+ }
+ tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1), noResult)
+ tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|message|cell|tablet_types",
+ "int64|varchar|varchar|varchar|varchar"),
+ streamInfoRows...))
+ tme.dbTargetClients[i].addInvariant(streamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"),
+ streamExtInfoRows...))
+ tme.dbTargetClients[i].addInvariant(reverseStreamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"),
+ streamExtInfoRows...))
+ }
+ }
+
+ for i, shard := range shards {
+ for _, shardToMove := range shardsToMove {
+ var streamInfoRows []string
+ if shardToMove == shard {
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: "ks2",
+ Shard: shard,
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t1 where in_keyrange('%s')", shard)),
+ }, {
+ Match: "t2",
+ Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t2 where in_keyrange('%s')", shard)),
+ }},
+ },
+ }
+ streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v|||", i+1, bls))
+ tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1), noResult)
+ }
+ tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|message|cell|tablet_types",
+ "int64|varchar|varchar|varchar|varchar"),
+ streamInfoRows...),
+ )
+ }
+ }
+
+ tme.targetKeyspace = "ks2"
+ return tme
+}
+
func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targetShards []string) *testShardMigraterEnv {
tme := &testShardMigraterEnv{}
tme.ts = memorytopo.NewServer("cell1", "cell2")
@@ -296,6 +482,19 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe
tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange)
}
+ dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000))
+ tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ tme.mu.Lock()
+ defer tme.mu.Unlock()
+ for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) {
+ if ft.Tablet.Alias.Uid == tablet.Alias.Uid {
+ return ft, nil
+ }
+ }
+ return nil, nil
+ })
+ tabletconntest.SetProtocol("go.vt.wrangler.traffic_switcher_env_test", dialerName)
+
vs := &vschemapb.Keyspace{
Sharded: true,
Vindexes: map[string]*vschema.Vindex{
@@ -433,7 +632,7 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) {
tme.dbSourceClients = append(tme.dbSourceClients, dbclient)
dbClientFactory := func() binlogplayer.DBClient { return dbclient }
// Replace existing engine with a new one
- primary.TM.VREngine = vreplication.NewTestEngine(tme.ts, "", primary.FakeMysqlDaemon, dbClientFactory, dbClientFactory, dbclient.DBName(), nil)
+ primary.TM.VREngine = vreplication.NewTestEngine(tme.ts, primary.Tablet.GetAlias().GetCell(), primary.FakeMysqlDaemon, dbClientFactory, dbClientFactory, dbclient.DBName(), nil)
primary.TM.VREngine.Open(ctx)
}
for _, primary := range tme.targetPrimaries {
@@ -442,7 +641,7 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) {
tme.dbTargetClients = append(tme.dbTargetClients, dbclient)
dbClientFactory := func() binlogplayer.DBClient { return dbclient }
// Replace existing engine with a new one
- primary.TM.VREngine = vreplication.NewTestEngine(tme.ts, "", primary.FakeMysqlDaemon, dbClientFactory, dbClientFactory, dbclient.DBName(), nil)
+ primary.TM.VREngine = vreplication.NewTestEngine(tme.ts, primary.Tablet.GetAlias().GetCell(), primary.FakeMysqlDaemon, dbClientFactory, dbClientFactory, dbclient.DBName(), nil)
primary.TM.VREngine.Open(ctx)
}
tme.allDBClients = append(tme.dbSourceClients, tme.dbTargetClients...)
diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go
index cfecefaeabd..e31f4c4b8c2 100644
--- a/go/vt/wrangler/vdiff.go
+++ b/go/vt/wrangler/vdiff.go
@@ -692,7 +692,7 @@ func (df *vdiff) selectTablets(ctx context.Context, ts *trafficSwitcher) error {
if ts.ExternalTopo() != nil {
sourceTopo = ts.ExternalTopo()
}
- tp, err := discovery.NewTabletPicker(sourceTopo, []string{df.sourceCell}, df.ts.SourceKeyspaceName(), shard, df.tabletTypesStr)
+ tp, err := discovery.NewTabletPicker(ctx, sourceTopo, []string{df.sourceCell}, df.sourceCell, df.ts.SourceKeyspaceName(), shard, df.tabletTypesStr, discovery.TabletPickerOptions{})
if err != nil {
return err
}
@@ -710,7 +710,7 @@ func (df *vdiff) selectTablets(ctx context.Context, ts *trafficSwitcher) error {
go func() {
defer wg.Done()
err2 = df.forAll(df.targets, func(shard string, target *shardStreamer) error {
- tp, err := discovery.NewTabletPicker(df.ts.TopoServer(), []string{df.targetCell}, df.ts.TargetKeyspaceName(), shard, df.tabletTypesStr)
+ tp, err := discovery.NewTabletPicker(ctx, df.ts.TopoServer(), []string{df.targetCell}, df.targetCell, df.ts.TargetKeyspaceName(), shard, df.tabletTypesStr, discovery.TabletPickerOptions{})
if err != nil {
return err
}
diff --git a/go/vt/wrangler/vdiff_env_test.go b/go/vt/wrangler/vdiff_env_test.go
index 1d50f4dc28a..ca456867340 100644
--- a/go/vt/wrangler/vdiff_env_test.go
+++ b/go/vt/wrangler/vdiff_env_test.go
@@ -21,6 +21,7 @@ import (
"fmt"
"sync"
+ "vitess.io/vitess/go/mysql/fakesqldb"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/grpcclient"
"vitess.io/vitess/go/vt/logutil"
@@ -342,3 +343,18 @@ func (tmc *testVDiffTMClient) PrimaryPosition(ctx context.Context, tablet *topod
}
return pos, nil
}
+
+func expectVDiffQueries(db *fakesqldb.DB) {
+ res := &sqltypes.Result{}
+ queries := []string{
+ "USE `vt_ks`",
+ "USE `vt_ks1`",
+ "USE `vt_ks2`",
+ "optimize table _vt.copy_state",
+ "alter table _vt.copy_state auto_increment = 1",
+ }
+ for _, query := range queries {
+ db.AddQuery(query, res)
+ }
+ db.AddQueryPattern("delete from vd, vdt, vdl.*", res)
+}
diff --git a/go/vt/wrangler/version.go b/go/vt/wrangler/version.go
index 7b7626b25f4..be0bd019331 100644
--- a/go/vt/wrangler/version.go
+++ b/go/vt/wrangler/version.go
@@ -21,11 +21,9 @@ import (
"fmt"
"io"
"net/http"
- "sync"
"context"
- "vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -78,62 +76,18 @@ func (wr *Wrangler) GetVersion(ctx context.Context, tabletAlias *topodatapb.Tabl
return resp.Version, err
}
-// helper method to asynchronously get and diff a version
-func (wr *Wrangler) diffVersion(ctx context.Context, primaryVersion string, primaryAlias *topodatapb.TabletAlias, alias *topodatapb.TabletAlias, wg *sync.WaitGroup, er concurrency.ErrorRecorder) {
- defer wg.Done()
- log.Infof("Gathering version for %v", topoproto.TabletAliasString(alias))
- replicaVersion, err := wr.GetVersion(ctx, alias)
- if err != nil {
- er.RecordError(err)
- return
- }
-
- if primaryVersion != replicaVersion {
- er.RecordError(fmt.Errorf("primary %v version %v is different than replica %v version %v", topoproto.TabletAliasString(primaryAlias), primaryVersion, topoproto.TabletAliasString(alias), replicaVersion))
- }
-}
-
// ValidateVersionShard validates all versions are the same in all
// tablets in a shard
func (wr *Wrangler) ValidateVersionShard(ctx context.Context, keyspace, shard string) error {
- si, err := wr.ts.GetShard(ctx, keyspace, shard)
- if err != nil {
- return err
- }
-
- // get version from the primary, or error
- if !si.HasPrimary() {
- return fmt.Errorf("no primary in shard %v/%v", keyspace, shard)
- }
- log.Infof("Gathering version for primary %v", topoproto.TabletAliasString(si.PrimaryAlias))
- primaryVersion, err := wr.GetVersion(ctx, si.PrimaryAlias)
- if err != nil {
- return err
- }
-
- // read all the aliases in the shard, that is all tablets that are
- // replicating from the primary
- aliases, err := wr.ts.FindAllTabletAliasesInShard(ctx, keyspace, shard)
- if err != nil {
- return err
- }
-
- // then diff with all replicas
- er := concurrency.AllErrorRecorder{}
- wg := sync.WaitGroup{}
- for _, alias := range aliases {
- if topoproto.TabletAliasEqual(alias, si.PrimaryAlias) {
- continue
- }
+ res, err := wr.VtctldServer().ValidateVersionShard(ctx, &vtctldatapb.ValidateVersionShardRequest{
+ Keyspace: keyspace,
+ Shard: shard,
+ })
- wg.Add(1)
- go wr.diffVersion(ctx, primaryVersion, si.PrimaryAlias, alias, &wg, &er)
- }
- wg.Wait()
- if er.HasErrors() {
- return fmt.Errorf("version diffs: %v", er.Error().Error())
+ if len(res.Results) > 0 {
+ return fmt.Errorf("version diffs: %v", res.Results)
}
- return nil
+ return err
}
// ValidateVersionKeyspace validates all versions are the same in all
diff --git a/go/vt/wrangler/workflow.go b/go/vt/wrangler/workflow.go
index 4a4ca50518a..468e82f080a 100644
--- a/go/vt/wrangler/workflow.go
+++ b/go/vt/wrangler/workflow.go
@@ -144,7 +144,7 @@ func (vrw *VReplicationWorkflow) stateAsString(ws *workflow.State) string {
if !vrw.Exists() {
stateInfo = append(stateInfo, WorkflowStateNotCreated)
} else {
- if !vrw.ts.isPartialMigration { // shard level traffic switching is all or nothing
+ if !ws.IsPartialMigration { // shard level traffic switching is all or nothing
if len(ws.RdonlyCellsNotSwitched) == 0 && len(ws.ReplicaCellsNotSwitched) == 0 && len(ws.ReplicaCellsSwitched) > 0 {
s = "All Reads Switched"
} else if len(ws.RdonlyCellsSwitched) == 0 && len(ws.ReplicaCellsSwitched) == 0 {
@@ -172,21 +172,21 @@ func (vrw *VReplicationWorkflow) stateAsString(ws *workflow.State) string {
}
if ws.WritesSwitched {
stateInfo = append(stateInfo, "Writes Switched")
- } else if vrw.ts.isPartialMigration {
- if ws.WritesPartiallySwitched {
- // For partial migrations, the traffic switching is all or nothing
- // at the shard level, so reads are effectively switched on the
- // shard when writes are switched.
- sourceShards := vrw.ts.SourceShards()
- switchedShards := make([]string, len(sourceShards))
- for i, sourceShard := range sourceShards {
- switchedShards[i] = sourceShard.ShardName()
- }
- stateInfo = append(stateInfo, fmt.Sprintf("Reads partially switched, for shards: %s", strings.Join(switchedShards, ",")))
- stateInfo = append(stateInfo, fmt.Sprintf("Writes partially switched, for shards: %s", strings.Join(switchedShards, ",")))
+ } else if ws.IsPartialMigration {
+ // For partial migrations, the traffic switching is all or nothing
+ // at the shard level, so reads are effectively switched on the
+ // shard when writes are switched.
+ if len(ws.ShardsAlreadySwitched) > 0 && len(ws.ShardsNotYetSwitched) > 0 {
+ stateInfo = append(stateInfo, fmt.Sprintf("Reads partially switched, for shards: %s", strings.Join(ws.ShardsAlreadySwitched, ",")))
+ stateInfo = append(stateInfo, fmt.Sprintf("Writes partially switched, for shards: %s", strings.Join(ws.ShardsAlreadySwitched, ",")))
} else {
- stateInfo = append(stateInfo, "Reads Not Switched")
- stateInfo = append(stateInfo, "Writes Not Switched")
+ if len(ws.ShardsAlreadySwitched) == 0 {
+ stateInfo = append(stateInfo, "Reads Not Switched")
+ stateInfo = append(stateInfo, "Writes Not Switched")
+ } else {
+ stateInfo = append(stateInfo, "All Reads Switched")
+ stateInfo = append(stateInfo, "All Writes Switched")
+ }
}
} else {
stateInfo = append(stateInfo, "Writes Not Switched")
diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go
index ea04baf6569..8c3c3c46690 100644
--- a/go/vt/wrangler/wrangler.go
+++ b/go/vt/wrangler/wrangler.go
@@ -21,6 +21,8 @@ package wrangler
import (
"context"
+ "golang.org/x/sync/semaphore"
+
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/topo"
@@ -53,6 +55,9 @@ type Wrangler struct {
// VExecFunc is a test-only fixture that allows us to short circuit vexec commands.
// DO NOT USE in production code.
VExecFunc func(ctx context.Context, workflow, keyspace, query string, dryRun bool) (map[*topo.TabletInfo]*sqltypes.Result, error)
+ // Limt the number of concurrent background goroutines if needed.
+ // nolint:ignore U1000
+ sem *semaphore.Weighted
}
// New creates a new Wrangler object.
diff --git a/java/client/pom.xml b/java/client/pom.xml
index e54be493892..ab7f5d65600 100644
--- a/java/client/pom.xml
+++ b/java/client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 15.0.0-SNAPSHOT
+ 15.0.5
vitess-client
diff --git a/java/example/pom.xml b/java/example/pom.xml
index 9efc0399b79..e4603ff845c 100644
--- a/java/example/pom.xml
+++ b/java/example/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 15.0.0-SNAPSHOT
+ 15.0.5
vitess-example
diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml
index 5af03fddfc6..637345c23f3 100644
--- a/java/grpc-client/pom.xml
+++ b/java/grpc-client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 15.0.0-SNAPSHOT
+ 15.0.5
vitess-grpc-client
diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml
index 8ce51843a9f..005e7a614fd 100644
--- a/java/jdbc/pom.xml
+++ b/java/jdbc/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 15.0.0-SNAPSHOT
+ 15.0.5
vitess-jdbc
diff --git a/java/pom.xml b/java/pom.xml
index 9515b1aa3b4..15f544012a4 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -11,7 +11,7 @@
io.vitess
vitess-parent
- 15.0.0-SNAPSHOT
+ 15.0.5
pom
Vitess Java Client libraries [Parent]
diff --git a/misc/git/hooks/golangci-lint b/misc/git/hooks/golangci-lint
index 2a270cf308a..361a3f5a8ff 100755
--- a/misc/git/hooks/golangci-lint
+++ b/misc/git/hooks/golangci-lint
@@ -16,7 +16,7 @@
GOLANGCI_LINT=$(command -v golangci-lint >/dev/null 2>&1)
if [ $? -eq 1 ]; then
echo "Downloading golangci-lint..."
- go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2
+ go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2
fi
gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$')
diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto
index e23a642d35e..d27adb83331 100644
--- a/proto/binlogdata.proto
+++ b/proto/binlogdata.proto
@@ -295,6 +295,10 @@ enum VEventType {
VERSION = 17;
LASTPK = 18;
SAVEPOINT = 19;
+ // COPY_COMPLETED is sent when VTGate's VStream copy operation is done.
+ // If a client experiences some disruptions before receiving the event,
+ // the client should restart the copy operation.
+ COPY_COMPLETED = 20;
}
// RowChange represents one row change.
diff --git a/proto/query.proto b/proto/query.proto
index 1e7e533cf44..fb376ed9cc9 100644
--- a/proto/query.proto
+++ b/proto/query.proto
@@ -325,6 +325,14 @@ message ExecuteOptions {
// if the user has created temp tables, Vitess will not reuse plans created for this session in other sessions.
// The current session can still use other sessions cached plans.
bool has_created_temp_tables = 12;
+
+ // WorkloadName specifies the name of the workload as indicated in query directives. This is used for instrumentation
+ // in metrics and tracing spans.
+ string WorkloadName = 15;
+
+ // priority specifies the priority of the query, between 0 and 100. This is leveraged by the transaction
+ // throttler to determine whether, under resource contention, a query should or should not be throttled.
+ string priority = 16;
}
// Field describes a single column returned by a query
diff --git a/proto/replicationdata.proto b/proto/replicationdata.proto
index 536ea2c4d13..c842a6fc6ae 100644
--- a/proto/replicationdata.proto
+++ b/proto/replicationdata.proto
@@ -25,9 +25,8 @@ package replicationdata;
// flavor-specific command and parsed into a Position and fields.
message Status {
string position = 1;
- // 2 used to be io_thread_running. Instead io_state is used.
- // 3 used to be sql_thread_running. Instead sql_state is used.
- reserved 2, 3;
+ bool io_thread_running = 2;
+ bool sql_thread_running = 3;
uint32 replication_lag_seconds = 4;
string source_host = 5;
int32 source_port = 6;
@@ -50,6 +49,7 @@ message Status {
bool has_replication_filters = 22;
bool ssl_allowed = 23;
bool replication_lag_unknown = 24;
+ bool backup_running = 25;
}
// StopReplicationStatus represents the replication status before calling StopReplication, and the replication status collected immediately after
diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto
index 842a71b7392..29ce3901721 100644
--- a/proto/tabletmanagerdata.proto
+++ b/proto/tabletmanagerdata.proto
@@ -474,6 +474,9 @@ message PromoteReplicaResponse {
message BackupRequest {
int64 concurrency = 1;
bool allow_primary = 2;
+
+ // BackupEngine specifies if we want to use a particular backup engine for this backup request
+ optional string backup_engine = 5;
}
message BackupResponse {
@@ -482,6 +485,9 @@ message BackupResponse {
message RestoreFromBackupRequest {
vttime.Time backup_time = 1;
+
+ // AllowedBackupEngines, if present will filter out any backups taken with engines not included in the list
+ repeated string allowed_backup_engines = 5;
}
message RestoreFromBackupResponse {
diff --git a/proto/vtadmin.proto b/proto/vtadmin.proto
index 66d1a9d27be..5c9c60cb6b0 100644
--- a/proto/vtadmin.proto
+++ b/proto/vtadmin.proto
@@ -64,6 +64,8 @@ service VTAdmin {
rpc GetCellsAliases(GetCellsAliasesRequest) returns (GetCellsAliasesResponse) {};
// GetClusters returns all configured clusters.
rpc GetClusters(GetClustersRequest) returns (GetClustersResponse) {};
+ // GetFullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others
+ rpc GetFullStatus(GetFullStatusRequest) returns (vtctldata.GetFullStatusResponse) {};
// GetGates returns all gates across all the specified clusters.
rpc GetGates(GetGatesRequest) returns (GetGatesResponse) {};
// GetKeyspace returns a keyspace by name in the specified cluster.
@@ -88,6 +90,8 @@ service VTAdmin {
rpc GetTablet(GetTabletRequest) returns (Tablet) {};
// GetTablets returns all tablets across all the specified clusters.
rpc GetTablets(GetTabletsRequest) returns (GetTabletsResponse) {};
+ // GetTopologyPath returns the cell located at the specified path in the topology server.
+ rpc GetTopologyPath(GetTopologyPathRequest) returns (vtctldata.GetTopologyPathResponse){};
// GetVSchema returns a VSchema for the specified keyspace in the specified
// cluster.
rpc GetVSchema(GetVSchemaRequest) returns (VSchema) {};
@@ -148,6 +152,9 @@ service VTAdmin {
// * "orchestrator" here refers to external orchestrator, not the newer,
// Vitess-aware orchestrator, VTOrc.
rpc TabletExternallyPromoted(TabletExternallyPromotedRequest) returns (TabletExternallyPromotedResponse) {};
+ // Validate validates all nodes in a cluster that are reachable from the global replication graph,
+ // as well as all tablets in discoverable cells, are consistent
+ rpc Validate(ValidateRequest) returns (vtctldata.ValidateResponse) {};
// ValidateKeyspace validates that all nodes reachable from the specified
// keyspace are consistent.
rpc ValidateKeyspace(ValidateKeyspaceRequest) returns (vtctldata.ValidateKeyspaceResponse) {};
@@ -155,9 +162,13 @@ service VTAdmin {
// for shard 0 matches the schema on all of the other tablets in the
// keyspace.
rpc ValidateSchemaKeyspace(ValidateSchemaKeyspaceRequest) returns (vtctldata.ValidateSchemaKeyspaceResponse) {};
- // ValidateVersionKeyspace validates that the version on the primary of
+ // ValidateShard validates that that all nodes reachable from the specified shard are consistent.
+ rpc ValidateShard(ValidateShardRequest) returns (vtctldata.ValidateShardResponse) {};
+ // ValidateVersionKeyspace validates that the version on the primary of
// shard 0 matches all of the other tablets in the keyspace.
rpc ValidateVersionKeyspace(ValidateVersionKeyspaceRequest) returns (vtctldata.ValidateVersionKeyspaceResponse) {};
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ rpc ValidateVersionShard(ValidateVersionShardRequest) returns (vtctldata.ValidateVersionShardResponse) {};
// VTExplain provides information on how Vitess plans to execute a
// particular query.
rpc VTExplain(VTExplainRequest) returns (VTExplainResponse) {};
@@ -415,6 +426,11 @@ message GetClustersResponse {
repeated Cluster clusters = 1;
}
+message GetFullStatusRequest {
+ string cluster_id = 1;
+ topodata.TabletAlias alias = 2;
+}
+
message GetGatesRequest {
repeated string cluster_ids = 1;
}
@@ -505,6 +521,11 @@ message GetTabletsResponse {
repeated Tablet tablets = 1;
}
+message GetTopologyPathRequest {
+ string cluster_id = 1;
+ string path = 2;
+}
+
message GetVSchemaRequest {
string cluster_id = 1;
string keyspace = 2;
@@ -808,6 +829,11 @@ message TabletExternallyReparentedRequest {
repeated string cluster_ids = 2;
}
+message ValidateRequest {
+ string cluster_id = 1;
+ bool ping_tablets = 2;
+}
+
message ValidateKeyspaceRequest {
string cluster_id = 1;
string keyspace = 2;
@@ -819,11 +845,24 @@ message ValidateSchemaKeyspaceRequest {
string keyspace = 2;
}
+message ValidateShardRequest {
+ string cluster_id = 1;
+ string keyspace = 2;
+ string shard = 3;
+ bool ping_tablets = 4;
+}
+
message ValidateVersionKeyspaceRequest {
string cluster_id = 1;
string keyspace = 2;
}
+message ValidateVersionShardRequest {
+ string cluster_id = 1;
+ string keyspace = 2;
+ string shard = 3;
+}
+
message VTExplainRequest {
string cluster = 1;
string keyspace = 2;
diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto
index 3e3cab48e78..3f5108b84d6 100644
--- a/proto/vtctldata.proto
+++ b/proto/vtctldata.proto
@@ -268,6 +268,9 @@ message BackupRequest {
// Concurrency specifies the number of compression/checksum jobs to run
// simultaneously.
uint64 concurrency = 3;
+
+ // BackupEngine specifies if we want to use a particular backup engine for this backup request
+ optional string backup_engine = 6;
}
message BackupResponse {
@@ -447,6 +450,9 @@ message EmergencyReparentShardRequest {
// PreventCrossCellPromotion is used to only promote the new primary from the same cell
// as the failed primary.
bool prevent_cross_cell_promotion = 6;
+ // ExpectedPrimary is the optional alias we expect to be the current primary in order for
+ // the reparent operation to succeed.
+ topodata.TabletAlias expected_primary = 8;
}
message EmergencyReparentShardResponse {
@@ -723,6 +729,23 @@ message GetTabletsResponse {
repeated topodata.Tablet tablets = 1;
}
+message GetTopologyPathRequest {
+ string path = 1;
+}
+
+message GetTopologyPathResponse {
+ TopologyCell cell = 1;
+}
+
+message TopologyCell {
+ string name = 1;
+ string path = 2;
+ // Data is the file contents of the cell located at path.
+ // It is only populated if the cell is a terminal node.
+ string data = 3;
+ repeated string children = 4;
+}
+
message GetVSchemaRequest {
string keyspace = 1;
}
@@ -790,6 +813,9 @@ message PlannedReparentShardRequest {
// WaitReplicasTimeout time to catch up before the reparent, and an additional
// WaitReplicasTimeout time to catch up after the reparent.
vttime.Duration wait_replicas_timeout = 5;
+ // ExpectedPrimary is the optional alias we expect to be the current primary in order for
+ // the reparent operation to succeed.
+ topodata.TabletAlias expected_primary = 8;
}
message PlannedReparentShardResponse {
@@ -942,6 +968,9 @@ message RestoreFromBackupRequest {
// BackupTime, if set, will use the backup taken most closely at or before
// this time. If nil, the latest backup will be restored on the tablet.
vttime.Time backup_time = 2;
+
+ // AllowedBackupEngines, if present will filter out any backups taken with engines not included in the list
+ repeated string allowed_backup_engines = 6;
}
message RestoreFromBackupResponse {
@@ -1225,6 +1254,15 @@ message ValidateVersionKeyspaceResponse {
map results_by_shard = 2;
}
+message ValidateVersionShardRequest {
+ string keyspace = 1;
+ string shard = 2;
+}
+
+message ValidateVersionShardResponse {
+ repeated string results = 1;
+}
+
message ValidateVSchemaRequest {
string keyspace = 1;
repeated string shards = 2;
diff --git a/proto/vtctlservice.proto b/proto/vtctlservice.proto
index eff0cac4945..de69443eee4 100644
--- a/proto/vtctlservice.proto
+++ b/proto/vtctlservice.proto
@@ -139,6 +139,8 @@ service Vtctld {
rpc GetTablet(vtctldata.GetTabletRequest) returns (vtctldata.GetTabletResponse) {};
// GetTablets returns tablets, optionally filtered by keyspace and shard.
rpc GetTablets(vtctldata.GetTabletsRequest) returns (vtctldata.GetTabletsResponse) {};
+ // GetTopologyPath returns the topology cell at a given path.
+ rpc GetTopologyPath(vtctldata.GetTopologyPathRequest) returns (vtctldata.GetTopologyPathResponse) {};
// GetVersion returns the version of a tablet from its debug vars.
rpc GetVersion(vtctldata.GetVersionRequest) returns (vtctldata.GetVersionResponse) {};
// GetVSchema returns the vschema for a keyspace.
@@ -285,6 +287,8 @@ service Vtctld {
rpc ValidateShard(vtctldata.ValidateShardRequest) returns (vtctldata.ValidateShardResponse) {};
// ValidateVersionKeyspace validates that the version on the primary of shard 0 matches all of the other tablets in the keyspace.
rpc ValidateVersionKeyspace(vtctldata.ValidateVersionKeyspaceRequest) returns (vtctldata.ValidateVersionKeyspaceResponse) {};
+ // ValidateVersionShard validates that the version on the primary matches all of the replicas.
+ rpc ValidateVersionShard(vtctldata.ValidateVersionShardRequest) returns (vtctldata.ValidateVersionShardResponse) {};
// ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences.
rpc ValidateVSchema(vtctldata.ValidateVSchemaRequest) returns (vtctldata.ValidateVSchemaResponse) {};
}
diff --git a/proto/vtgate.proto b/proto/vtgate.proto
index 55e94221e5f..6431f58f727 100644
--- a/proto/vtgate.proto
+++ b/proto/vtgate.proto
@@ -283,6 +283,8 @@ message VStreamFlags {
// if specified, these cells (comma-separated) are used to pick source tablets from.
// defaults to the cell of the vtgate serving the VStream API.
string cells = 4;
+ string cell_preference = 5;
+ string tablet_order = 6;
}
// VStreamRequest is the payload for VStream.
diff --git a/test.go b/test.go
index fd0f5fe3464..0914c79988d 100755
--- a/test.go
+++ b/test.go
@@ -77,7 +77,7 @@ For example:
// Flags
var (
flavor = flag.String("flavor", "mysql57", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors)
- bootstrapVersion = flag.String("bootstrap-version", "11", "the version identifier to use for the docker images")
+ bootstrapVersion = flag.String("bootstrap-version", "21", "the version identifier to use for the docker images")
runCount = flag.Int("runs", 1, "run each test this many times")
retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests")
logPass = flag.Bool("log-pass", false, "log test output even if it passes")
@@ -659,9 +659,11 @@ type TestStats struct {
func sendStats(values url.Values) {
if *remoteStats != "" {
log.Printf("Sending remote stats to %v", *remoteStats)
- if _, err := http.PostForm(*remoteStats, values); err != nil {
+ resp, err := http.PostForm(*remoteStats, values)
+ if err != nil {
log.Printf("Can't send remote stats: %v", err)
}
+ defer resp.Body.Close()
}
}
diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go
index c39752d93fc..f71c86d1f75 100644
--- a/test/ci_workflow_gen.go
+++ b/test/ci_workflow_gen.go
@@ -40,11 +40,11 @@ type mysqlVersions []mysqlVersion
var (
defaultMySQLVersions = []mysqlVersion{defaultMySQLVersion}
- allMySQLVersions = []mysqlVersion{mysql57, mysql80}
+ allMySQLVersions = []mysqlVersion{mysql80}
)
var (
- unitTestDatabases = []mysqlVersion{mysql57, mysql80, mariadb103}
+ unitTestDatabases = []mysqlVersion{mysql80}
)
const (
@@ -79,7 +79,6 @@ var (
"21",
"22",
"mysql_server_vault",
- "26",
"vstream_failover",
"vstream_stoponreshard_true",
"vstream_stoponreshard_false",
@@ -115,6 +114,7 @@ var (
"vtgate_vschema",
"vtgate_queries",
"vtgate_schema_tracker",
+ "vtorc",
"xb_recovery",
"mysql80",
"vreplication_across_db_versions",
@@ -125,11 +125,10 @@ var (
"schemadiff_vrepl",
"topo_connection_cache",
"vtgate_partial_keyspace",
+ "vttablet_prscomplex",
}
- clusterSelfHostedList = []string{
- "vtorc",
- }
+ clusterSelfHostedList = []string{}
clusterDockerList = []string{}
clustersRequiringXtraBackup = []string{
"xb_backup",
diff --git a/test/config.json b/test/config.json
index da32b876ded..f9e27153c99 100644
--- a/test/config.json
+++ b/test/config.json
@@ -125,7 +125,7 @@
"Manual": false,
"Shard": "vtbackup_transform",
"RetryMax": 1,
- "Tags": ["upgrade_downgrade_backups"]
+ "Tags": [""]
},
"backup_transform_mysqlctld": {
"File": "unused.go",
@@ -261,7 +261,7 @@
},
"onlineddl_ghost": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/ghost"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/ghost", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_ghost",
@@ -270,7 +270,7 @@
},
"onlineddl_vrepl": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_vrepl",
@@ -306,7 +306,7 @@
},
"onlineddl_revert": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/revert"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/revert", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_revert",
@@ -315,7 +315,7 @@
},
"onlineddl_revertible": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/revertible"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/revertible", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_revertible",
@@ -324,7 +324,7 @@
},
"onlineddl_declarative": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/declarative"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/declarative", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_declarative",
@@ -333,7 +333,7 @@
},
"onlineddl_singleton": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/singleton"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/singleton", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_singleton",
@@ -342,7 +342,7 @@
},
"onlineddl_scheduler": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/scheduler"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/scheduler", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "onlineddl_scheduler",
@@ -351,7 +351,7 @@
},
"schemadiff_vrepl": {
"File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/schemadiff/vrepl"],
+ "Args": ["vitess.io/vitess/go/test/endtoend/schemadiff/vrepl", "-timeout", "30m"],
"Command": [],
"Manual": false,
"Shard": "schemadiff_vrepl",
@@ -369,17 +369,6 @@
"site_test"
]
},
- "pitrtls": {
- "File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitrtls"],
- "Command": [],
- "Manual": false,
- "Shard": "26",
- "RetryMax": 1,
- "Tags": [
- "site_test"
- ]
- },
"recovery": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"],
@@ -549,6 +538,15 @@
"RetryMax": 2,
"Tags": []
},
+ "vtgate_queries_derived": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/derived"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "vtgate_queries",
+ "RetryMax": 1,
+ "Tags": []
+ },
"vtgate_queries_aggregation": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/aggregation"],
@@ -720,15 +718,6 @@
"RetryMax": 1,
"Tags": []
},
- "vtgate_mysql80_derived": {
- "File": "unused.go",
- "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/mysql80/derived"],
- "Command": [],
- "Manual": false,
- "Shard": "mysql80",
- "RetryMax": 1,
- "Tags": []
- },
"vtgate_sequence": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/sequence"],
@@ -801,6 +790,15 @@
"RetryMax": 1,
"Tags": []
},
+ "vtgate_transaction_restart": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/transaction/restart"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "vtgate_transaction",
+ "RetryMax": 1,
+ "Tags": []
+ },
"vtgate_transaction_rollback": {
"File": "unused.go",
"Args": ["vitess.io/vitess/go/test/endtoend/vtgate/transaction/rollback"],
@@ -1187,6 +1185,24 @@
"Shard": "topo_connection_cache",
"RetryMax": 1,
"Tags": []
+ },
+ "prscomplex": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/reparent/prscomplex"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "vttablet_prscomplex",
+ "RetryMax": 1,
+ "Tags": [""]
+ },
+ "prssettingspool": {
+ "File": "unused.go",
+ "Args": ["vitess.io/vitess/go/test/endtoend/reparent/prssettingspool"],
+ "Command": [],
+ "Manual": false,
+ "Shard": "vttablet_prscomplex",
+ "RetryMax": 1,
+ "Tags": [""]
}
}
}
diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl
index 1284931c53b..8d410ca2bd8 100644
--- a/test/templates/cluster_endtoend_test.tpl
+++ b/test/templates/cluster_endtoend_test.tpl
@@ -4,30 +4,26 @@ concurrency:
group: format('{0}-{1}', ${{"{{"}} github.ref {{"}}"}}, '{{.Name}}')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{`{{ github.event.pull_request.head.sha }}`}}"
-
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: {{"${{ secrets.GH_ACCESS_TOKEN }}"}}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -41,7 +37,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -50,13 +47,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -69,6 +69,8 @@ jobs:
- name: Get dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
+ {{if .InstallXtraBackup}}
+
# Setup Percona Server for MySQL 8.0
sudo apt-get update
sudo apt-get install -y lsb-release gnupg2 curl
@@ -79,6 +81,21 @@ jobs:
# Install everything else we need, and configure
sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils
+
+ {{else}}
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils
+
+ {{end}}
+
sudo service mysql stop
sudo service etcd stop
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
@@ -90,7 +107,7 @@ jobs:
{{if .InstallXtraBackup}}
- sudo apt-get install percona-xtrabackup-80 lz4
+ sudo apt-get install -y percona-xtrabackup-80 lz4
{{end}}
@@ -103,59 +120,42 @@ jobs:
{{end}}
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- {{if .LimitResourceUsage}}
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql80.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_in_core_file=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
- {{end}}
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}} | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ {{if .LimitResourceUsage}}
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+ {{end}}
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}}
diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl
index 6914c7bedc8..b898328d1fb 100644
--- a/test/templates/cluster_endtoend_test_docker.tpl
+++ b/test/templates/cluster_endtoend_test_docker.tpl
@@ -4,22 +4,23 @@ on: [push, pull_request]
jobs:
build:
name: Run endtoend tests on {{.Name}}
- {{if .Ubuntu20}}runs-on: ubuntu-20.04{{else}}runs-on: ubuntu-latest{{end}}
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: {{"${{ secrets.GH_ACCESS_TOKEN }}"}}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -33,7 +34,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -42,9 +44,12 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -53,6 +58,14 @@ jobs:
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: |
- go run test.go -docker=true --follow -shard {{.Shard}}
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ go run test.go -docker=true --follow -shard {{.Shard}}
diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl
index 815d5aa47d8..9741b789ef0 100644
--- a/test/templates/cluster_endtoend_test_mysql57.tpl
+++ b/test/templates/cluster_endtoend_test_mysql57.tpl
@@ -4,11 +4,8 @@ concurrency:
group: format('{0}-{1}', ${{"{{"}} github.ref {{"}}"}}, '{{.Name}}')
cancel-in-progress: true
-env:
- LAUNCHABLE_ORGANIZATION: "vitess"
- LAUNCHABLE_WORKSPACE: "vitess-app"
- GITHUB_PR_HEAD_SHA: "${{`{{ github.event.pull_request.head.sha }}`}}"
{{if .InstallXtraBackup}}
+#env:
# This is used if we need to pin the xtrabackup version used in tests.
# If this is NOT set then the latest version available will be used.
#XTRABACKUP_VERSION: "2.4.24-1"
@@ -17,22 +14,23 @@ env:
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: {{"${{ secrets.GH_ACCESS_TOKEN }}"}}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -46,7 +44,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -55,13 +54,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -89,11 +91,11 @@ jobs:
sudo rm -rf /etc/mysql
# Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
@@ -112,6 +114,7 @@ jobs:
wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb"
sudo apt-get install -y gnupg2
sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb"
+ sudo percona-release enable-only pxb-24
sudo apt-get update
if [[ -n $XTRABACKUP_VERSION ]]; then
debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb"
@@ -132,58 +135,41 @@ jobs:
{{end}}
- - name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- run: |
- # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
- pip3 install --user launchable~=1.0 > /dev/null
-
- # verify that launchable setup is all correct.
- launchable verify || true
-
- # Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
-
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 45
- run: |
- # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
- # which musn't be more than 107 characters long.
- export VTDATAROOT="/tmp/"
- source build.env
-
- set -x
-
- {{if .LimitResourceUsage}}
- # Increase our local ephemeral port range as we could exhaust this
- sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
- # Increase our open file descriptor limit as we could hit this
- ulimit -n 65536
- cat <<-EOF>>./config/mycnf/mysql57.cnf
- innodb_buffer_pool_dump_at_shutdown=OFF
- innodb_buffer_pool_load_at_startup=OFF
- innodb_buffer_pool_size=64M
- innodb_doublewrite=OFF
- innodb_flush_log_at_trx_commit=0
- innodb_flush_method=O_DIRECT
- innodb_numa_interleave=ON
- innodb_adaptive_hash_index=OFF
- sync_binlog=0
- sync_relay_log=0
- performance_schema=OFF
- slow-query-log=OFF
- EOF
- {{end}}
-
- # run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}} | tee -a output.txt | go-junit-report -set-exit-code > report.xml
-
- - name: Print test output and Record test result in launchable
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
- run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
-
- # print test output
- cat output.txt
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 45
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -x
+
+ {{if .LimitResourceUsage}}
+ # Increase our local ephemeral port range as we could exhaust this
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 61999"
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql57.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+ {{end}}
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}}
diff --git a/test/templates/cluster_endtoend_test_self_hosted.tpl b/test/templates/cluster_endtoend_test_self_hosted.tpl
index 36f115ca18a..7d96cb0b41f 100644
--- a/test/templates/cluster_endtoend_test_self_hosted.tpl
+++ b/test/templates/cluster_endtoend_test_self_hosted.tpl
@@ -7,22 +7,23 @@ concurrency:
jobs:
build:
name: Run endtoend tests on {{.Name}}
- runs-on: self-hosted
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: {{"${{ secrets.GH_ACCESS_TOKEN }}"}}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -36,7 +37,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -50,8 +52,17 @@ jobs:
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- timeout-minutes: 30
- run: docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard {{.Shard}} -- -- --keep-data=true'
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard {{.Shard}} -- -- --keep-data=true'
- name: Print Volume Used
if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl
index 29fac20f450..c83ba3f7520 100644
--- a/test/templates/dockerfile.tpl
+++ b/test/templates/dockerfile.tpl
@@ -1,4 +1,4 @@
-ARG bootstrap_version=11
+ARG bootstrap_version=21
ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}"
FROM "${image}"
@@ -15,6 +15,7 @@ RUN wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_
RUN apt-get update
RUN apt-get install -y gnupg2
RUN dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+RUN percona-release enable-only pxb-24
RUN apt-get update
RUN apt-get install -y percona-xtrabackup-24
{{end}}
diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl
index 440fec7cf12..43d9e1caeb4 100644
--- a/test/templates/unit_test.tpl
+++ b/test/templates/unit_test.tpl
@@ -6,22 +6,23 @@ concurrency:
jobs:
test:
- runs-on: ubuntu-20.04
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: {{"${{ secrets.GH_ACCESS_TOKEN }}"}}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -35,7 +36,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -44,9 +46,16 @@ jobs:
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: 1.18.5
+ go-version: 1.21.10
+
+ - name: Setup github.com/slackhq/vitess-additions access token
+ run: git config --global url.https://$GH_ACCESS_TOKEN@github.com/.insteadOf https://github.com/
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
+ uses: actions/setup-python@v4
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
@@ -73,12 +82,12 @@ jobs:
{{if (eq .Platform "mysql57")}}
# Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# mysql57
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
- # Bionic packages are still compatible for Focal since there's no MySQL 5.7
- # packages for Focal.
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
@@ -89,10 +98,10 @@ jobs:
{{if (eq .Platform "mysql80")}}
# Get key to latest MySQL repo
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
# mysql80
- wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
@@ -131,6 +140,26 @@ jobs:
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ set -exo pipefail
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ export NOVTADMINBUILD=1
+ eatmydata -- make unit_test
+
+ - name: Print test output and Record test result in launchable
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always()
run: |
- eatmydata -- make unit_test
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+
+ # print test output
+ cat output.txt
+>>>>>>> ffb2410bd3 (CI: Misc test improvements to limit failures with various runners (#13825))
diff --git a/test/templates/unit_test_self_hosted.tpl b/test/templates/unit_test_self_hosted.tpl
index a8a2e351c66..bdc40f2c861 100644
--- a/test/templates/unit_test_self_hosted.tpl
+++ b/test/templates/unit_test_self_hosted.tpl
@@ -6,22 +6,23 @@ concurrency:
jobs:
test:
- runs-on: self-hosted
+ runs-on:
+ group: vitess-ubuntu20
+ env:
+ GOPRIVATE: github.com/slackhq/vitess-addons
+ GH_ACCESS_TOKEN: {{"${{ secrets.GH_ACCESS_TOKEN }}"}}
steps:
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
- if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then
- skip='true'
- fi
echo Skip ${skip}
- echo "::set-output name=skip-workflow::${skip}"
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
@@ -35,7 +36,8 @@ jobs:
- 'test.go'
- 'Makefile'
- 'build.env'
- - 'go.[sumod]'
+ - 'go.sum'
+ - 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
@@ -48,8 +50,18 @@ jobs:
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
- timeout-minutes: 30
- run: docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'make unit_test'
+ uses: nick-fields/retry@v2
+ with:
+ timeout_minutes: 30
+ max_attempts: 3
+ retry_on: error
+ command: |
+ set -exo pipefail
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'NOVTADMINBUILD=1 make unit_test'
- name: Print Volume Used
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/tools/all_test_for_coverage.sh b/tools/all_test_for_coverage.sh
index 07fff9f40cb..7b154c8959f 100755
--- a/tools/all_test_for_coverage.sh
+++ b/tools/all_test_for_coverage.sh
@@ -25,7 +25,7 @@ source build.env
make tools
make build
echo "--------- executing unit testcases ---------"
-packages_with_all_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/... | sort)
+packages_with_all_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/... | sort)
all_except_endtoend_tests=$(echo "$packages_with_all_tests" | grep -v "endtoend" | cut -d" " -f1 )
counter=0
diff --git a/tools/check_make_sizegen.sh b/tools/check_make_sizegen.sh
index 787ed43da49..8443c6d8e44 100755
--- a/tools/check_make_sizegen.sh
+++ b/tools/check_make_sizegen.sh
@@ -1,49 +1,22 @@
#!/bin/bash
+
#
# Validate that the current version of the generated cache_size files match the output
# generated by sizegen.
#
-# This is used in Travis to verify that the currently committed version was
-# generated with the proper cache_size files.
source build.env
-TMP="/tmp/cached_size.$$.go"
-ALL_FILES=$(find . -name "cached_size.go")
-
-set +e
-
-goimports -local vitess.io/vitess -w $ALL_FILES
-
-for SRC in $ALL_FILES
-do
- TMP="/tmp/"$(echo "$SRC" | sed 's/\//_/g' | sed "s/cached_size.go/cached_size_$$.go/g")
- mv "$SRC" "$TMP"
-done
+first_output=$(git status --porcelain)
make sizegen
-STATUS=0
-
-for SRC in $ALL_FILES
-do
- TMP="/tmp/"$(echo "$SRC" | sed 's/\//_/g' | sed "s/cached_size.go/cached_size_$$.go/g")
-
- if [ ! -f "$SRC" ]; then
- mv "$TMP" "$SRC"
- continue
- fi
-
- if ! diff -q "$SRC" "$TMP" > /dev/null ; then
- echo "ERROR: Regenerated file for $SRC does not match the current version:"
- diff -u "$SRC" "$TMP"
-
- echo
- echo "Please re-run 'make sizegen' to generate."
- STATUS=1
- fi
- mv "$TMP" "$SRC"
-done
+second_output=$(git status --porcelain)
-exit $STATUS
+diff=$(diff <( echo "$first_output") <( echo "$second_output"))
+if [[ "$diff" != "" ]]; then
+ echo "ERROR: Regenerated cached_size files do not match the current version."
+ echo -e "List of files containing differences:\n$diff"
+ exit 1
+fi
diff --git a/tools/do_release.sh b/tools/do_release.sh
index 1a269dfbc7a..5b89b0f5819 100755
--- a/tools/do_release.sh
+++ b/tools/do_release.sh
@@ -113,6 +113,12 @@ function updateVitessExamples () {
rm -f $(find -E $ROOT/examples/compose/**/* -regex ".*.(go|yml).bak")
}
+# First argument is the Release Version the docker release script should be set to (for instance: v15.0.0)
+function updateDockerReleaseScript () {
+ sed -i.bak -E "s/vt_base_version=.*/vt_base_version='v$1'/g" $ROOT/docker/release.sh
+ rm -f $ROOT/docker/release.sh.bak
+}
+
# Preparing and tagging the release
function doRelease () {
checkoutNewBranch "tag"
@@ -131,6 +137,7 @@ function doRelease () {
# Preparing the release commit
updateVitessExamples $RELEASE_VERSION $VTOP_VERSION
updateJava $RELEASE_VERSION
+ updateDockerReleaseScript $RELEASE_VERSION
updateVersionGo $RELEASE_VERSION
## Create the commit for this release and tag it
@@ -152,6 +159,7 @@ function doBackToDevMode () {
# Preparing the "dev mode" commit
updateJava $DEV_VERSION
+ updateDockerReleaseScript $DEV_VERSION
updateVersionGo $DEV_VERSION
git add --all
diff --git a/tools/e2e_test_cluster.sh b/tools/e2e_test_cluster.sh
index 991cd29ce53..da9d3684df1 100755
--- a/tools/e2e_test_cluster.sh
+++ b/tools/e2e_test_cluster.sh
@@ -22,7 +22,7 @@
source build.env
-packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort)
+packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort)
cluster_tests=$(echo "$packages_with_tests" | grep -E "go/test/endtoend" | cut -d" " -f1)
diff --git a/tools/e2e_test_race.sh b/tools/e2e_test_race.sh
index 32374a25c17..b072e1261e2 100755
--- a/tools/e2e_test_race.sh
+++ b/tools/e2e_test_race.sh
@@ -32,15 +32,17 @@ trap '[ -f "$temp_log_file" ] && rm $temp_log_file' EXIT
# All endtoend Go packages with test files.
# Output per line: *
-packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort)
+packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort)
packages_with_tests=$(echo "$packages_with_tests" | grep -vE "go/test/endtoend" | cut -d" " -f1)
# endtoend tests should be in a directory called endtoend
all_e2e_tests=$(echo "$packages_with_tests" | cut -d" " -f1)
+set -exo pipefail
+
# Run all endtoend tests.
echo "$all_e2e_tests" | xargs go test $VT_GO_PARALLEL -race 2>&1 | tee $temp_log_file
-if [ ${PIPESTATUS[0]} -ne 0 ]; then
+if [ ${PIPESTATUS[1]} -ne 0 ]; then
if grep "WARNING: DATA RACE" -q $temp_log_file; then
echo
echo "ERROR: go test -race found a data race. See log above."
diff --git a/tools/e2e_test_runner.sh b/tools/e2e_test_runner.sh
index dc2edbf0e59..1fc5c2cb558 100755
--- a/tools/e2e_test_runner.sh
+++ b/tools/e2e_test_runner.sh
@@ -38,7 +38,7 @@ fi
# All Go packages with test files.
# Output per line: *
-packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort)
+packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/.../endtoend/... | sort)
# Flaky tests have the suffix "_flaky_test.go".
all_except_flaky_and_cluster_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_test\.go" | grep -vE "go/test/endtoend" | cut -d" " -f1)
diff --git a/tools/get_previous_release.sh b/tools/get_previous_release.sh
index 64988099c56..39cf1e58815 100755
--- a/tools/get_previous_release.sh
+++ b/tools/get_previous_release.sh
@@ -18,27 +18,11 @@
# into the release folder (app) for checkin. Prior to running this script,
# bootstrap.sh and bootstrap_web.sh should already have been run.
-# github.base_ref $1
-target_release=""
-
-base_release_branch=$(echo "$1" | grep -E 'release-[0-9]*.0$')
-if [ "$base_release_branch" == "" ]; then
- base_release_branch=$(echo "$2" | grep -E 'release-[0-9]*.0$')
-fi
-if [ "$base_release_branch" != "" ]; then
- major_release=$(echo "$base_release_branch" | sed 's/release-*//' | sed 's/\.0//')
- target_major_release=$((major_release-1))
- target_release_number=$(git show-ref --tags | grep -E 'refs/tags/v[0-9]*.[0-9]*.[0-9]*$' | sed 's/[a-z0-9]* refs\/tags\/v//' | awk -v FS=. -v RELEASE="$target_major_release" '{if ($1 == RELEASE) print; }' | sort -nr | head -n1)
- target_release="v$target_release_number"
-else
- target_major_release=$(git show-ref | grep -E 'refs/remotes/origin/release-[0-9]*\.0$' | sed 's/[a-z0-9]* refs\/remotes\/origin\/release-//' | sed 's/\.0//' | sort -nr | head -n1)
- target_release_number=$(git show-ref --tags | grep -E 'refs/tags/v[0-9]*.[0-9]*.[0-9]*$' | sed 's/[a-z0-9]* refs\/tags\/v//' | awk -v FS=. -v RELEASE="$target_major_release" '{if ($1 == RELEASE) print; }' | sort -nr | head -n1)
- target_release="v$target_release_number"
- if [ -z "$target_release_number" ]
- then
- target_release="release-$target_major_release.0"
- fi
-fi
-
-echo "$target_release"
\ No newline at end of file
+# This script expects release names to follow the upstream release naming
+# scheme, but Slack has our own naming scheme. Updating the logic to
+# parse Slack release names became a rabbit hole I want to defer.
+#
+# I will hardcode the previous Slack release here because it is static.
+#
+echo slack-vitess-r14.0.5
diff --git a/tools/make-release-packages.sh b/tools/make-release-packages.sh
index 36450530c9e..31ddd6cc96a 100755
--- a/tools/make-release-packages.sh
+++ b/tools/make-release-packages.sh
@@ -42,6 +42,9 @@ done;
# Copy remaining files, preserving date/permissions
# But resolving symlinks
cp -rpfL examples "${RELEASE_DIR}"
+# copy vtadmin as well
+mkdir -p "${RELEASE_DIR}"/web/vtadmin
+cp -rpfL web/vtadmin "${RELEASE_DIR}"/web
echo "Follow the installation instructions at: https://vitess.io/docs/get-started/local/" > "${RELEASE_DIR}"/examples/README.md
diff --git a/tools/rowlog/rowlog.go b/tools/rowlog/rowlog.go
index 9593e88d652..369cc68b5db 100644
--- a/tools/rowlog/rowlog.go
+++ b/tools/rowlog/rowlog.go
@@ -378,7 +378,18 @@ func getFlavor(ctx context.Context, server, keyspace string) string {
}
func getTablet(ctx context.Context, ts *topo.Server, cells []string, keyspace string) string {
- picker, err := discovery.NewTabletPicker(ts, cells, keyspace, "0", "primary")
+ picker, err := discovery.NewTabletPicker(
+ ctx,
+ ts,
+ cells,
+ "",
+ keyspace,
+ "0",
+ "primary",
+ discovery.TabletPickerOptions{
+ CellPreference: "OnlySpecified",
+ },
+ )
if err != nil {
return ""
}
diff --git a/tools/unit_test_race.sh b/tools/unit_test_race.sh
index 320f220ade4..4cec1f365a9 100755
--- a/tools/unit_test_race.sh
+++ b/tools/unit_test_race.sh
@@ -23,7 +23,7 @@ fi
# All Go packages with test files.
# Output per line: *
-packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/... | sort)
+packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/... | sort)
# exclude end to end tests
packages_to_test=$(echo "$packages_with_tests" | cut -d" " -f1 | grep -v "endtoend")
diff --git a/tools/unit_test_runner.sh b/tools/unit_test_runner.sh
index aedb35de3c8..70e7303792b 100755
--- a/tools/unit_test_runner.sh
+++ b/tools/unit_test_runner.sh
@@ -52,7 +52,7 @@ esac
# All Go packages with test files.
# Output per line: *
-packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}' ./go/... | sort)
+packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/... | sort)
# Flaky tests have the suffix "_flaky_test.go".
# Exclude endtoend tests
diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json
index 81197195151..6437ad678a6 100644
--- a/web/vtadmin/package-lock.json
+++ b/web/vtadmin/package-lock.json
@@ -28,6 +28,7 @@
"query-string": "^6.14.0",
"react": "^17.0.1",
"react-dom": "^17.0.1",
+ "react-flow-renderer": "^10.3.17",
"react-query": "^3.5.9",
"react-router-dom": "^5.2.0",
"react-scripts": "5.0.1",
@@ -1811,9 +1812,9 @@
}
},
"node_modules/@babel/runtime": {
- "version": "7.16.5",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.16.5.tgz",
- "integrity": "sha512-TXWihFIS3Pyv5hzR7j6ihmeLkZfrXGxAr5UfSl8CHf+6q/wpiYDkUau0czckpYG8QmnCIuPpdLtuA9VmuGGyMA==",
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.19.0.tgz",
+ "integrity": "sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==",
"dependencies": {
"regenerator-runtime": "^0.13.4"
},
@@ -4226,6 +4227,228 @@
"integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==",
"dev": true
},
+ "node_modules/@types/d3": {
+ "version": "7.4.0",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.0.tgz",
+ "integrity": "sha512-jIfNVK0ZlxcuRDKtRS/SypEyOQ6UHaFQBKv032X45VvxSJ6Yi5G9behy9h6tNTHTDGh5Vq+KbmBjUWLgY4meCA==",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/d3-axis": "*",
+ "@types/d3-brush": "*",
+ "@types/d3-chord": "*",
+ "@types/d3-color": "*",
+ "@types/d3-contour": "*",
+ "@types/d3-delaunay": "*",
+ "@types/d3-dispatch": "*",
+ "@types/d3-drag": "*",
+ "@types/d3-dsv": "*",
+ "@types/d3-ease": "*",
+ "@types/d3-fetch": "*",
+ "@types/d3-force": "*",
+ "@types/d3-format": "*",
+ "@types/d3-geo": "*",
+ "@types/d3-hierarchy": "*",
+ "@types/d3-interpolate": "*",
+ "@types/d3-path": "*",
+ "@types/d3-polygon": "*",
+ "@types/d3-quadtree": "*",
+ "@types/d3-random": "*",
+ "@types/d3-scale": "*",
+ "@types/d3-scale-chromatic": "*",
+ "@types/d3-selection": "*",
+ "@types/d3-shape": "*",
+ "@types/d3-time": "*",
+ "@types/d3-time-format": "*",
+ "@types/d3-timer": "*",
+ "@types/d3-transition": "*",
+ "@types/d3-zoom": "*"
+ }
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.0.3.tgz",
+ "integrity": "sha512-Reoy+pKnvsksN0lQUlcH6dOGjRZ/3WRwXR//m+/8lt1BXeI4xyaUZoqULNjyXXRuh0Mj4LNpkCvhUpQlY3X5xQ=="
+ },
+ "node_modules/@types/d3-axis": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.1.tgz",
+ "integrity": "sha512-zji/iIbdd49g9WN0aIsGcwcTBUkgLsCSwB+uH+LPVDAiKWENMtI3cJEWt+7/YYwelMoZmbBfzA3qCdrZ2XFNnw==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-brush": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.1.tgz",
+ "integrity": "sha512-B532DozsiTuQMHu2YChdZU0qsFJSio3Q6jmBYGYNp3gMDzBmuFFgPt9qKA4VYuLZMp4qc6eX7IUFUEsvHiXZAw==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-chord": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.1.tgz",
+ "integrity": "sha512-eQfcxIHrg7V++W8Qxn6QkqBNBokyhdWSAS73AbkbMzvLQmVVBviknoz2SRS/ZJdIOmhcmmdCRE/NFOm28Z1AMw=="
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-HKuicPHJuvPgCD+np6Se9MQvS6OCbJmOjGvylzMJRlDwUXjKTTXs6Pwgk79O09Vj/ho3u1ofXnhFOaEWWPrlwA=="
+ },
+ "node_modules/@types/d3-contour": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.1.tgz",
+ "integrity": "sha512-C3zfBrhHZvrpAAK3YXqLWVAGo87A4SvJ83Q/zVJ8rFWJdKejUnDYaWZPkA8K84kb2vDA/g90LTQAz7etXcgoQQ==",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-delaunay": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.1.tgz",
+ "integrity": "sha512-tLxQ2sfT0p6sxdG75c6f/ekqxjyYR0+LwPrsO1mbC9YDBzPJhs2HbJJRrn8Ez1DBoHRo2yx7YEATI+8V1nGMnQ=="
+ },
+ "node_modules/@types/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-NhxMn3bAkqhjoxabVJWKryhnZXXYYVQxaBnbANu0O94+O/nX9qSjrA1P1jbAQJxJf+VC72TxDX/YJcKue5bRqw=="
+ },
+ "node_modules/@types/d3-drag": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.1.tgz",
+ "integrity": "sha512-o1Va7bLwwk6h03+nSM8dpaGEYnoIG19P0lKqlic8Un36ymh9NSkNFX1yiXMKNMx8rJ0Kfnn2eovuFaL6Jvj0zA==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-dsv": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.0.tgz",
+ "integrity": "sha512-o0/7RlMl9p5n6FQDptuJVMxDf/7EDEv2SYEO/CwdG2tr1hTfUVi0Iavkk2ax+VpaQ/1jVhpnj5rq1nj8vwhn2A=="
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.0.tgz",
+ "integrity": "sha512-aMo4eaAOijJjA6uU+GIeW018dvy9+oH5Y2VPPzjjfxevvGQ/oRDs+tfYC9b50Q4BygRR8yE2QCLsrT0WtAVseA=="
+ },
+ "node_modules/@types/d3-fetch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.1.tgz",
+ "integrity": "sha512-toZJNOwrOIqz7Oh6Q7l2zkaNfXkfR7mFSJvGvlD/Ciq/+SQ39d5gynHJZ/0fjt83ec3WL7+u3ssqIijQtBISsw==",
+ "dependencies": {
+ "@types/d3-dsv": "*"
+ }
+ },
+ "node_modules/@types/d3-force": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.3.tgz",
+ "integrity": "sha512-z8GteGVfkWJMKsx6hwC3SiTSLspL98VNpmvLpEFJQpZPq6xpA1I8HNBDNSpukfK0Vb0l64zGFhzunLgEAcBWSA=="
+ },
+ "node_modules/@types/d3-format": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.1.tgz",
+ "integrity": "sha512-5KY70ifCCzorkLuIkDe0Z9YTf9RR2CjBX1iaJG+rgM/cPP+sO+q9YdQ9WdhQcgPj1EQiJ2/0+yUkkziTG6Lubg=="
+ },
+ "node_modules/@types/d3-geo": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.0.2.tgz",
+ "integrity": "sha512-DbqK7MLYA8LpyHQfv6Klz0426bQEf7bRTvhMy44sNGVyZoWn//B0c+Qbeg8Osi2Obdc9BLLXYAKpyWege2/7LQ==",
+ "dependencies": {
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-hierarchy": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.0.tgz",
+ "integrity": "sha512-g+sey7qrCa3UbsQlMZZBOHROkFqx7KZKvUpRzI/tAp/8erZWpYq7FgNKvYwebi2LaEiVs1klhUfd3WCThxmmWQ=="
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-jx5leotSeac3jr0RePOH1KdR9rISG91QIE4Q2PYTu4OymLTZfA3SrnURSLzKH48HmXVUru50b8nje4E79oQSQw==",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.0.0.tgz",
+ "integrity": "sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg=="
+ },
+ "node_modules/@types/d3-polygon": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.0.tgz",
+ "integrity": "sha512-D49z4DyzTKXM0sGKVqiTDTYr+DHg/uxsiWDAkNrwXYuiZVd9o9wXZIo+YsHkifOiyBkmSWlEngHCQme54/hnHw=="
+ },
+ "node_modules/@types/d3-quadtree": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.2.tgz",
+ "integrity": "sha512-QNcK8Jguvc8lU+4OfeNx+qnVy7c0VrDJ+CCVFS9srBo2GL9Y18CnIxBdTF3v38flrGy5s1YggcoAiu6s4fLQIw=="
+ },
+ "node_modules/@types/d3-random": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.1.tgz",
+ "integrity": "sha512-IIE6YTekGczpLYo/HehAy3JGF1ty7+usI97LqraNa8IiDur+L44d0VOjAvFQWJVdZOJHukUJw+ZdZBlgeUsHOQ=="
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-Yk4htunhPAwN0XGlIwArRomOjdoBFXC3+kCxK2Ubg7I9shQlVSJy/pG/Ht5ASN+gdMIalpk8TJ5xV74jFsetLA==",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-scale-chromatic": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz",
+ "integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw=="
+ },
+ "node_modules/@types/d3-selection": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.3.tgz",
+ "integrity": "sha512-Mw5cf6nlW1MlefpD9zrshZ+DAWL4IQ5LnWfRheW6xwsdaWOb6IRRu2H7XPAQcyXEx1D7XQWgdoKR83ui1/HlEA=="
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.0.tgz",
+ "integrity": "sha512-jYIYxFFA9vrJ8Hd4Se83YI6XF+gzDL1aC5DCsldai4XYYiVNdhtpGbA/GM6iyQ8ayhSp3a148LY34hy7A4TxZA==",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz",
+ "integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg=="
+ },
+ "node_modules/@types/d3-time-format": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.0.tgz",
+ "integrity": "sha512-yjfBUe6DJBsDin2BMIulhSHmr5qNR5Pxs17+oW4DoVPyVIXZ+m6bs7j1UVKP08Emv6jRmYrYqxYzO63mQxy1rw=="
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.0.tgz",
+ "integrity": "sha512-HNB/9GHqu7Fo8AQiugyJbv6ZxYz58wef0esl4Mv828w1ZKpAshw/uFWVDUcIB9KKFeFKoxS3cHY07FFgtTRZ1g=="
+ },
+ "node_modules/@types/d3-transition": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.2.tgz",
+ "integrity": "sha512-jo5o/Rf+/u6uerJ/963Dc39NI16FQzqwOc54bwvksGAdVfvDrqDpVeq95bEvPtBwLCVZutAEyAtmSyEMxN7vxQ==",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-zoom": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.1.tgz",
+ "integrity": "sha512-7s5L9TjfqIYQmQQEUcpMAcBOahem7TRoSO/+Gkz02GbMVuULiZzjF2BOdw291dbO2aNon4m2OdFsRGaCq2caLQ==",
+ "dependencies": {
+ "@types/d3-interpolate": "*",
+ "@types/d3-selection": "*"
+ }
+ },
"node_modules/@types/eslint": {
"version": "7.29.0",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-7.29.0.tgz",
@@ -4270,6 +4493,11 @@
"@types/range-parser": "*"
}
},
+ "node_modules/@types/geojson": {
+ "version": "7946.0.10",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.10.tgz",
+ "integrity": "sha512-Nmh0K3iWQJzniTuPRcJn5hxXkfB1T1pgB89SBig5PlJQU5yocazeu4jATJlaA0GYFKWMqDdvYemoSnF2pXgLVA=="
+ },
"node_modules/@types/graceful-fs": {
"version": "4.1.5",
"resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.5.tgz",
@@ -4470,6 +4698,11 @@
"@types/react": "*"
}
},
+ "node_modules/@types/resize-observer-browser": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.7.tgz",
+ "integrity": "sha512-G9eN0Sn0ii9PWQ3Vl72jDPgeJwRWhv2Qk/nQkJuWmRmOB4HX3/BhD5SE1dZs/hzPZL/WKnvF0RHdTSG54QJFyg=="
+ },
"node_modules/@types/resolve": {
"version": "1.17.1",
"resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.17.1.tgz",
@@ -6339,6 +6572,11 @@
"resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz",
"integrity": "sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA=="
},
+ "node_modules/classcat": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz",
+ "integrity": "sha512-sbpkOw6z413p+HDGcBENe498WM9woqWHiJxCq7nvmxe9WmrUmqfAcxpIwAiMtM5Q3AhYkzXcNQHqsWq0mND51g=="
+ },
"node_modules/classnames": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.1.tgz",
@@ -7780,6 +8018,102 @@
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.5.tgz",
"integrity": "sha512-uVDi8LpBUKQj6sdxNaTetL6FpeCqTjOvAQuQUa/qAqq8oOd4ivkbhgnqayl0dnPal8Tb/yB1tF+gOvCBiicaiQ=="
},
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "d3-selection": "2 - 3"
+ }
+ },
+ "node_modules/d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
"node_modules/damerau-levenshtein": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz",
@@ -17083,6 +17417,28 @@
"resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz",
"integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg=="
},
+ "node_modules/react-flow-renderer": {
+ "version": "10.3.17",
+ "resolved": "https://registry.npmjs.org/react-flow-renderer/-/react-flow-renderer-10.3.17.tgz",
+ "integrity": "sha512-bywiqVErlh5kCDqw3x0an5Ur3mT9j9CwJsDwmhmz4i1IgYM1a0SPqqEhClvjX+s5pU4nHjmVaGXWK96pwsiGcQ==",
+ "dependencies": {
+ "@babel/runtime": "^7.18.9",
+ "@types/d3": "^7.4.0",
+ "@types/resize-observer-browser": "^0.1.7",
+ "classcat": "^5.0.3",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^3.7.2"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "peerDependencies": {
+ "react": "16 || 17 || 18",
+ "react-dom": "16 || 17 || 18"
+ }
+ },
"node_modules/react-is": {
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
@@ -20939,6 +21295,22 @@
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
+ },
+ "node_modules/zustand": {
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-3.7.2.tgz",
+ "integrity": "sha512-PIJDIZKtokhof+9+60cpockVOq05sJzHCriyvaLBmEJixseQ1a5Kdov6fWZfWOu5SK9c+FhH1jU0tntLxRJYMA==",
+ "engines": {
+ "node": ">=12.7.0"
+ },
+ "peerDependencies": {
+ "react": ">=16.8"
+ },
+ "peerDependenciesMeta": {
+ "react": {
+ "optional": true
+ }
+ }
}
},
"dependencies": {
@@ -22112,9 +22484,9 @@
}
},
"@babel/runtime": {
- "version": "7.16.5",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.16.5.tgz",
- "integrity": "sha512-TXWihFIS3Pyv5hzR7j6ihmeLkZfrXGxAr5UfSl8CHf+6q/wpiYDkUau0czckpYG8QmnCIuPpdLtuA9VmuGGyMA==",
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.19.0.tgz",
+ "integrity": "sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==",
"requires": {
"regenerator-runtime": "^0.13.4"
}
@@ -23917,6 +24289,228 @@
"integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==",
"dev": true
},
+ "@types/d3": {
+ "version": "7.4.0",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.0.tgz",
+ "integrity": "sha512-jIfNVK0ZlxcuRDKtRS/SypEyOQ6UHaFQBKv032X45VvxSJ6Yi5G9behy9h6tNTHTDGh5Vq+KbmBjUWLgY4meCA==",
+ "requires": {
+ "@types/d3-array": "*",
+ "@types/d3-axis": "*",
+ "@types/d3-brush": "*",
+ "@types/d3-chord": "*",
+ "@types/d3-color": "*",
+ "@types/d3-contour": "*",
+ "@types/d3-delaunay": "*",
+ "@types/d3-dispatch": "*",
+ "@types/d3-drag": "*",
+ "@types/d3-dsv": "*",
+ "@types/d3-ease": "*",
+ "@types/d3-fetch": "*",
+ "@types/d3-force": "*",
+ "@types/d3-format": "*",
+ "@types/d3-geo": "*",
+ "@types/d3-hierarchy": "*",
+ "@types/d3-interpolate": "*",
+ "@types/d3-path": "*",
+ "@types/d3-polygon": "*",
+ "@types/d3-quadtree": "*",
+ "@types/d3-random": "*",
+ "@types/d3-scale": "*",
+ "@types/d3-scale-chromatic": "*",
+ "@types/d3-selection": "*",
+ "@types/d3-shape": "*",
+ "@types/d3-time": "*",
+ "@types/d3-time-format": "*",
+ "@types/d3-timer": "*",
+ "@types/d3-transition": "*",
+ "@types/d3-zoom": "*"
+ }
+ },
+ "@types/d3-array": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.0.3.tgz",
+ "integrity": "sha512-Reoy+pKnvsksN0lQUlcH6dOGjRZ/3WRwXR//m+/8lt1BXeI4xyaUZoqULNjyXXRuh0Mj4LNpkCvhUpQlY3X5xQ=="
+ },
+ "@types/d3-axis": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.1.tgz",
+ "integrity": "sha512-zji/iIbdd49g9WN0aIsGcwcTBUkgLsCSwB+uH+LPVDAiKWENMtI3cJEWt+7/YYwelMoZmbBfzA3qCdrZ2XFNnw==",
+ "requires": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "@types/d3-brush": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.1.tgz",
+ "integrity": "sha512-B532DozsiTuQMHu2YChdZU0qsFJSio3Q6jmBYGYNp3gMDzBmuFFgPt9qKA4VYuLZMp4qc6eX7IUFUEsvHiXZAw==",
+ "requires": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "@types/d3-chord": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.1.tgz",
+ "integrity": "sha512-eQfcxIHrg7V++W8Qxn6QkqBNBokyhdWSAS73AbkbMzvLQmVVBviknoz2SRS/ZJdIOmhcmmdCRE/NFOm28Z1AMw=="
+ },
+ "@types/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-HKuicPHJuvPgCD+np6Se9MQvS6OCbJmOjGvylzMJRlDwUXjKTTXs6Pwgk79O09Vj/ho3u1ofXnhFOaEWWPrlwA=="
+ },
+ "@types/d3-contour": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.1.tgz",
+ "integrity": "sha512-C3zfBrhHZvrpAAK3YXqLWVAGo87A4SvJ83Q/zVJ8rFWJdKejUnDYaWZPkA8K84kb2vDA/g90LTQAz7etXcgoQQ==",
+ "requires": {
+ "@types/d3-array": "*",
+ "@types/geojson": "*"
+ }
+ },
+ "@types/d3-delaunay": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.1.tgz",
+ "integrity": "sha512-tLxQ2sfT0p6sxdG75c6f/ekqxjyYR0+LwPrsO1mbC9YDBzPJhs2HbJJRrn8Ez1DBoHRo2yx7YEATI+8V1nGMnQ=="
+ },
+ "@types/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-NhxMn3bAkqhjoxabVJWKryhnZXXYYVQxaBnbANu0O94+O/nX9qSjrA1P1jbAQJxJf+VC72TxDX/YJcKue5bRqw=="
+ },
+ "@types/d3-drag": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.1.tgz",
+ "integrity": "sha512-o1Va7bLwwk6h03+nSM8dpaGEYnoIG19P0lKqlic8Un36ymh9NSkNFX1yiXMKNMx8rJ0Kfnn2eovuFaL6Jvj0zA==",
+ "requires": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "@types/d3-dsv": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.0.tgz",
+ "integrity": "sha512-o0/7RlMl9p5n6FQDptuJVMxDf/7EDEv2SYEO/CwdG2tr1hTfUVi0Iavkk2ax+VpaQ/1jVhpnj5rq1nj8vwhn2A=="
+ },
+ "@types/d3-ease": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.0.tgz",
+ "integrity": "sha512-aMo4eaAOijJjA6uU+GIeW018dvy9+oH5Y2VPPzjjfxevvGQ/oRDs+tfYC9b50Q4BygRR8yE2QCLsrT0WtAVseA=="
+ },
+ "@types/d3-fetch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.1.tgz",
+ "integrity": "sha512-toZJNOwrOIqz7Oh6Q7l2zkaNfXkfR7mFSJvGvlD/Ciq/+SQ39d5gynHJZ/0fjt83ec3WL7+u3ssqIijQtBISsw==",
+ "requires": {
+ "@types/d3-dsv": "*"
+ }
+ },
+ "@types/d3-force": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.3.tgz",
+ "integrity": "sha512-z8GteGVfkWJMKsx6hwC3SiTSLspL98VNpmvLpEFJQpZPq6xpA1I8HNBDNSpukfK0Vb0l64zGFhzunLgEAcBWSA=="
+ },
+ "@types/d3-format": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.1.tgz",
+ "integrity": "sha512-5KY70ifCCzorkLuIkDe0Z9YTf9RR2CjBX1iaJG+rgM/cPP+sO+q9YdQ9WdhQcgPj1EQiJ2/0+yUkkziTG6Lubg=="
+ },
+ "@types/d3-geo": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.0.2.tgz",
+ "integrity": "sha512-DbqK7MLYA8LpyHQfv6Klz0426bQEf7bRTvhMy44sNGVyZoWn//B0c+Qbeg8Osi2Obdc9BLLXYAKpyWege2/7LQ==",
+ "requires": {
+ "@types/geojson": "*"
+ }
+ },
+ "@types/d3-hierarchy": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.0.tgz",
+ "integrity": "sha512-g+sey7qrCa3UbsQlMZZBOHROkFqx7KZKvUpRzI/tAp/8erZWpYq7FgNKvYwebi2LaEiVs1klhUfd3WCThxmmWQ=="
+ },
+ "@types/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-jx5leotSeac3jr0RePOH1KdR9rISG91QIE4Q2PYTu4OymLTZfA3SrnURSLzKH48HmXVUru50b8nje4E79oQSQw==",
+ "requires": {
+ "@types/d3-color": "*"
+ }
+ },
+ "@types/d3-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.0.0.tgz",
+ "integrity": "sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg=="
+ },
+ "@types/d3-polygon": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.0.tgz",
+ "integrity": "sha512-D49z4DyzTKXM0sGKVqiTDTYr+DHg/uxsiWDAkNrwXYuiZVd9o9wXZIo+YsHkifOiyBkmSWlEngHCQme54/hnHw=="
+ },
+ "@types/d3-quadtree": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.2.tgz",
+ "integrity": "sha512-QNcK8Jguvc8lU+4OfeNx+qnVy7c0VrDJ+CCVFS9srBo2GL9Y18CnIxBdTF3v38flrGy5s1YggcoAiu6s4fLQIw=="
+ },
+ "@types/d3-random": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.1.tgz",
+ "integrity": "sha512-IIE6YTekGczpLYo/HehAy3JGF1ty7+usI97LqraNa8IiDur+L44d0VOjAvFQWJVdZOJHukUJw+ZdZBlgeUsHOQ=="
+ },
+ "@types/d3-scale": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.2.tgz",
+ "integrity": "sha512-Yk4htunhPAwN0XGlIwArRomOjdoBFXC3+kCxK2Ubg7I9shQlVSJy/pG/Ht5ASN+gdMIalpk8TJ5xV74jFsetLA==",
+ "requires": {
+ "@types/d3-time": "*"
+ }
+ },
+ "@types/d3-scale-chromatic": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz",
+ "integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw=="
+ },
+ "@types/d3-selection": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.3.tgz",
+ "integrity": "sha512-Mw5cf6nlW1MlefpD9zrshZ+DAWL4IQ5LnWfRheW6xwsdaWOb6IRRu2H7XPAQcyXEx1D7XQWgdoKR83ui1/HlEA=="
+ },
+ "@types/d3-shape": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.0.tgz",
+ "integrity": "sha512-jYIYxFFA9vrJ8Hd4Se83YI6XF+gzDL1aC5DCsldai4XYYiVNdhtpGbA/GM6iyQ8ayhSp3a148LY34hy7A4TxZA==",
+ "requires": {
+ "@types/d3-path": "*"
+ }
+ },
+ "@types/d3-time": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz",
+ "integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg=="
+ },
+ "@types/d3-time-format": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.0.tgz",
+ "integrity": "sha512-yjfBUe6DJBsDin2BMIulhSHmr5qNR5Pxs17+oW4DoVPyVIXZ+m6bs7j1UVKP08Emv6jRmYrYqxYzO63mQxy1rw=="
+ },
+ "@types/d3-timer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.0.tgz",
+ "integrity": "sha512-HNB/9GHqu7Fo8AQiugyJbv6ZxYz58wef0esl4Mv828w1ZKpAshw/uFWVDUcIB9KKFeFKoxS3cHY07FFgtTRZ1g=="
+ },
+ "@types/d3-transition": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.2.tgz",
+ "integrity": "sha512-jo5o/Rf+/u6uerJ/963Dc39NI16FQzqwOc54bwvksGAdVfvDrqDpVeq95bEvPtBwLCVZutAEyAtmSyEMxN7vxQ==",
+ "requires": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "@types/d3-zoom": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.1.tgz",
+ "integrity": "sha512-7s5L9TjfqIYQmQQEUcpMAcBOahem7TRoSO/+Gkz02GbMVuULiZzjF2BOdw291dbO2aNon4m2OdFsRGaCq2caLQ==",
+ "requires": {
+ "@types/d3-interpolate": "*",
+ "@types/d3-selection": "*"
+ }
+ },
"@types/eslint": {
"version": "7.29.0",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-7.29.0.tgz",
@@ -23961,6 +24555,11 @@
"@types/range-parser": "*"
}
},
+ "@types/geojson": {
+ "version": "7946.0.10",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.10.tgz",
+ "integrity": "sha512-Nmh0K3iWQJzniTuPRcJn5hxXkfB1T1pgB89SBig5PlJQU5yocazeu4jATJlaA0GYFKWMqDdvYemoSnF2pXgLVA=="
+ },
"@types/graceful-fs": {
"version": "4.1.5",
"resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.5.tgz",
@@ -24161,6 +24760,11 @@
"@types/react": "*"
}
},
+ "@types/resize-observer-browser": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.7.tgz",
+ "integrity": "sha512-G9eN0Sn0ii9PWQ3Vl72jDPgeJwRWhv2Qk/nQkJuWmRmOB4HX3/BhD5SE1dZs/hzPZL/WKnvF0RHdTSG54QJFyg=="
+ },
"@types/resolve": {
"version": "1.17.1",
"resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.17.1.tgz",
@@ -25566,6 +26170,11 @@
"resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz",
"integrity": "sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA=="
},
+ "classcat": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz",
+ "integrity": "sha512-sbpkOw6z413p+HDGcBENe498WM9woqWHiJxCq7nvmxe9WmrUmqfAcxpIwAiMtM5Q3AhYkzXcNQHqsWq0mND51g=="
+ },
"classnames": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.1.tgz",
@@ -26545,6 +27154,72 @@
"resolved": "https://registry.npmjs.org/csstype/-/csstype-3.0.5.tgz",
"integrity": "sha512-uVDi8LpBUKQj6sdxNaTetL6FpeCqTjOvAQuQUa/qAqq8oOd4ivkbhgnqayl0dnPal8Tb/yB1tF+gOvCBiicaiQ=="
},
+ "d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA=="
+ },
+ "d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg=="
+ },
+ "d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "requires": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ }
+ },
+ "d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w=="
+ },
+ "d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "requires": {
+ "d3-color": "1 - 3"
+ }
+ },
+ "d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ=="
+ },
+ "d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA=="
+ },
+ "d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "requires": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ }
+ },
+ "d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "requires": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ }
+ },
"damerau-levenshtein": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz",
@@ -33347,6 +34022,21 @@
"resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz",
"integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg=="
},
+ "react-flow-renderer": {
+ "version": "10.3.17",
+ "resolved": "https://registry.npmjs.org/react-flow-renderer/-/react-flow-renderer-10.3.17.tgz",
+ "integrity": "sha512-bywiqVErlh5kCDqw3x0an5Ur3mT9j9CwJsDwmhmz4i1IgYM1a0SPqqEhClvjX+s5pU4nHjmVaGXWK96pwsiGcQ==",
+ "requires": {
+ "@babel/runtime": "^7.18.9",
+ "@types/d3": "^7.4.0",
+ "@types/resize-observer-browser": "^0.1.7",
+ "classcat": "^5.0.3",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^3.7.2"
+ }
+ },
"react-is": {
"version": "16.13.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
@@ -36262,6 +36952,12 @@
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
"integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="
+ },
+ "zustand": {
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-3.7.2.tgz",
+ "integrity": "sha512-PIJDIZKtokhof+9+60cpockVOq05sJzHCriyvaLBmEJixseQ1a5Kdov6fWZfWOu5SK9c+FhH1jU0tntLxRJYMA==",
+ "requires": {}
}
}
}
diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json
index 112e7802ac7..462b249b447 100644
--- a/web/vtadmin/package.json
+++ b/web/vtadmin/package.json
@@ -27,6 +27,7 @@
"query-string": "^6.14.0",
"react": "^17.0.1",
"react-dom": "^17.0.1",
+ "react-flow-renderer": "^10.3.17",
"react-query": "^3.5.9",
"react-router-dom": "^5.2.0",
"react-scripts": "5.0.1",
diff --git a/web/vtadmin/src/api/http.ts b/web/vtadmin/src/api/http.ts
index 4b5e0209a1f..22fe0d9bdd0 100644
--- a/web/vtadmin/src/api/http.ts
+++ b/web/vtadmin/src/api/http.ts
@@ -749,8 +749,95 @@ export const createShard = async (params: CreateShardParams) => {
method: 'post',
body: JSON.stringify(params),
});
- const err = pb.CreateShardRequest.verify(result);
+ const err = vtctldata.CreateShardResponse.verify(result);
if (err) throw Error(err);
return vtctldata.CreateShardResponse.create(result);
};
+export interface ValidateParams {
+ clusterID: string;
+ pingTablets: boolean;
+}
+
+export const validate = async (params: ValidateParams) => {
+ const { result } = await vtfetch(`/api/cluster/${params.clusterID}/validate`, {
+ method: 'put',
+ body: JSON.stringify({ ping_tablets: params.pingTablets }),
+ });
+ const err = pb.ValidateRequest.verify(result);
+ if (err) throw Error(err);
+
+ return vtctldata.ValidateResponse.create(result);
+};
+
+export interface ValidateShardParams {
+ clusterID: string;
+ keyspace: string;
+ shard: string;
+ pingTablets: boolean;
+}
+
+export const validateShard = async (params: ValidateShardParams) => {
+ const { result } = await vtfetch(`/api/shard/${params.clusterID}/${params.keyspace}/${params.shard}/validate`, {
+ method: 'put',
+ body: JSON.stringify({ ping_tablets: params.pingTablets }),
+ });
+
+ const err = vtctldata.ValidateShardResponse.verify(result);
+ if (err) throw Error(err);
+
+ return vtctldata.ValidateShardResponse.create(result);
+};
+
+export interface GetFullStatusParams {
+ clusterID: string;
+ alias: string;
+}
+
+export const getFullStatus = async (params: GetFullStatusParams) => {
+ const req = new URLSearchParams();
+ req.append('cluster', params.clusterID);
+
+ const { result } = await vtfetch(`/api/tablet/${params.alias}/full_status?${req.toString()}`);
+
+ const err = vtctldata.GetFullStatusResponse.verify(result);
+ if (err) throw Error(err);
+
+ return vtctldata.GetFullStatusResponse.create(result);
+};
+
+export interface ValidateVersionShardParams {
+ clusterID: string;
+ keyspace: string;
+ shard: string;
+}
+
+export const validateVersionShard = async (params: ValidateVersionShardParams) => {
+ const { result } = await vtfetch(
+ `/api/shard/${params.clusterID}/${params.keyspace}/${params.shard}/validate_version`,
+ {
+ method: 'put',
+ }
+ );
+
+ const err = vtctldata.ValidateVersionShardResponse.verify(result);
+ if (err) throw Error(err);
+
+ return vtctldata.ValidateVersionShardResponse.create(result);
+};
+export interface GetTopologyPathParams {
+ clusterID: string;
+ path: string;
+}
+
+export const getTopologyPath = async (params: GetTopologyPathParams) => {
+ const req = new URLSearchParams();
+ req.append('path', params.path);
+
+ const { result } = await vtfetch(`/api/cluster/${params.clusterID}/topology?${req}`);
+
+ const err = vtctldata.GetTopologyPathResponse.verify(result);
+ if (err) throw Error(err);
+
+ return vtctldata.GetTopologyPathResponse.create(result);
+};
diff --git a/web/vtadmin/src/components/ActionPanel.tsx b/web/vtadmin/src/components/ActionPanel.tsx
index 3e6fa62113b..45c68491ffc 100644
--- a/web/vtadmin/src/components/ActionPanel.tsx
+++ b/web/vtadmin/src/components/ActionPanel.tsx
@@ -93,16 +93,6 @@ const ActionPanel: React.FC = ({
)
)}
- {warnings.map(
- (warning, i) =>
- warning && (
-
-
- {warning}
-
- )
- )}
-
{/* Don't render the confirmation input if "disabled" prop is set */}
{requiresConfirmation && !disabled && (
<>
diff --git a/web/vtadmin/src/components/App.tsx b/web/vtadmin/src/components/App.tsx
index fb656060431..f7821f3a4ad 100644
--- a/web/vtadmin/src/components/App.tsx
+++ b/web/vtadmin/src/components/App.tsx
@@ -38,6 +38,8 @@ import { Vtctlds } from './routes/Vtctlds';
import { SnackbarContainer } from './Snackbar';
import { isReadOnlyMode } from '../util/env';
import { CreateKeyspace } from './routes/createKeyspace/CreateKeyspace';
+import { Topology } from './routes/topology/Topology';
+import { ClusterTopology } from './routes/topology/ClusterTopology';
export const App = () => {
return (
@@ -115,6 +117,14 @@ export const App = () => {
+
+
+
+
+
+
+
+
diff --git a/web/vtadmin/src/components/Icon.tsx b/web/vtadmin/src/components/Icon.tsx
index a26940fea68..ddf0ca65c18 100644
--- a/web/vtadmin/src/components/Icon.tsx
+++ b/web/vtadmin/src/components/Icon.tsx
@@ -50,4 +50,5 @@ export enum Icons {
question = 'question',
runQuery = 'runQuery',
search = 'search',
+ topology = 'topology',
}
diff --git a/web/vtadmin/src/components/NavRail.tsx b/web/vtadmin/src/components/NavRail.tsx
index a8a9594377f..e2897395760 100644
--- a/web/vtadmin/src/components/NavRail.tsx
+++ b/web/vtadmin/src/components/NavRail.tsx
@@ -71,6 +71,9 @@ export const NavRail = () => {
+
+
+
diff --git a/web/vtadmin/src/components/ValidationResults.tsx b/web/vtadmin/src/components/ValidationResults.tsx
new file mode 100644
index 00000000000..193b5295b1c
--- /dev/null
+++ b/web/vtadmin/src/components/ValidationResults.tsx
@@ -0,0 +1,60 @@
+import React from 'react';
+import { vtctldata } from '../proto/vtadmin';
+
+interface Props {
+ resultsByKeyspace?: {
+ [k: string]: vtctldata.IValidateKeyspaceResponse;
+ };
+ resultsByShard?: vtctldata.ValidateShardResponse | vtctldata.ValidateVersionShardResponse;
+ shard?: string;
+}
+
+const ValidationResults: React.FC = ({ resultsByKeyspace, resultsByShard, shard }) => {
+ const hasShardResults = resultsByShard && resultsByShard.results.length > 0;
+ return (
+
+
+
+
+ {resultsByKeyspace && Keyspace }
+ {resultsByKeyspace && Shard }
+ Result
+
+
+
+ {resultsByKeyspace &&
+ Object.entries(resultsByKeyspace).map(([keyspace, results]) => {
+ return (
+ results.results_by_shard &&
+ Object.entries(results.results_by_shard).map(([shard, results]) => (
+
+ {keyspace}
+ {shard}
+ {results.results}
+
+ ))
+ );
+ })}
+ {hasShardResults && (
+ <>
+ {resultsByShard.results.map((r, i) => (
+
+ {r}
+
+ ))}
+ >
+ )}
+ {!hasShardResults && (
+ <>
+
+ No results
+
+ >
+ )}
+
+
+
+ );
+};
+
+export default ValidationResults;
diff --git a/web/vtadmin/src/components/dialog/Dialog.tsx b/web/vtadmin/src/components/dialog/Dialog.tsx
index 9740f7a7a1d..ab1dcf44ef1 100644
--- a/web/vtadmin/src/components/dialog/Dialog.tsx
+++ b/web/vtadmin/src/components/dialog/Dialog.tsx
@@ -55,7 +55,7 @@ const Dialog: React.FC = ({
className="fixed z-10 inset-0 overflow-y-auto"
initialFocus={cancelButtonRef}
onClose={(_) => {
- onClose && onClose();
+ onClose?.();
}}
>
@@ -120,8 +120,7 @@ const Dialog: React.FC
= ({
type="button"
className="btn"
onClick={() => {
- onConfirm && onConfirm();
- onClose && onClose();
+ onConfirm?.();
}}
>
{loading ? loadingText : confirmText || 'Confirm'}
@@ -132,8 +131,8 @@ const Dialog: React.FC = ({
type="button"
className="btn btn-secondary"
onClick={() => {
- onCancel && onCancel();
- onClose && onClose();
+ onCancel?.();
+ onClose?.();
}}
ref={cancelButtonRef}
>
diff --git a/web/vtadmin/src/components/inputs/Select.tsx b/web/vtadmin/src/components/inputs/Select.tsx
index 6200d113fbf..c9292b56d76 100644
--- a/web/vtadmin/src/components/inputs/Select.tsx
+++ b/web/vtadmin/src/components/inputs/Select.tsx
@@ -35,6 +35,7 @@ interface Props {
selectedItem: T | null;
size?: 'large';
description?: string;
+ required?: boolean;
}
/**
@@ -56,6 +57,7 @@ export const Select = ({
selectedItem,
size,
description,
+ required,
}: Props) => {
const _itemToString = React.useCallback(
(item: T | null): string => {
@@ -136,7 +138,7 @@ export const Select = ({
return (
-
+
{description &&
{description}
}
{
useDocumentTitle('Clusters');
const clustersQuery = useClusters();
@@ -35,12 +34,7 @@ export const Clusters = () => {
}, [clustersQuery.data]);
const renderRows = (rows: pb.Cluster[]) =>
- rows.map((cluster, idx) => (
-
- {cluster.name}
- {cluster.id}
-
- ));
+ rows.map((cluster, idx) => );
return (
@@ -50,7 +44,7 @@ export const Clusters = () => {
-
+
diff --git a/web/vtadmin/src/components/routes/Gates.tsx b/web/vtadmin/src/components/routes/Gates.tsx
index 354d4507a3e..5e0f0fefd24 100644
--- a/web/vtadmin/src/components/routes/Gates.tsx
+++ b/web/vtadmin/src/components/routes/Gates.tsx
@@ -41,6 +41,7 @@ export const Gates = () => {
hostname: g.hostname,
keyspaces: g.keyspaces,
pool: g.pool,
+ fqdn: g.FQDN,
}));
const filtered = filterNouns(filter, mapped);
return orderBy(filtered, ['cluster', 'pool', 'hostname', 'cell']);
@@ -53,7 +54,17 @@ export const Gates = () => {
{gate.pool}
{gate.cluster}
-
{gate.hostname}
+
+ {gate.fqdn ? (
+
+ ) : (
+ gate.hostname
+ )}
+
{gate.cell}
{(gate.keyspaces || []).join(', ')}
diff --git a/web/vtadmin/src/components/routes/clusters/ClusterRow.tsx b/web/vtadmin/src/components/routes/clusters/ClusterRow.tsx
new file mode 100644
index 00000000000..fba35d5257f
--- /dev/null
+++ b/web/vtadmin/src/components/routes/clusters/ClusterRow.tsx
@@ -0,0 +1,93 @@
+import React, { useState } from 'react';
+import { DataCell } from '../../dataTable/DataCell';
+import { vtadmin as pb } from '../../../proto/vtadmin';
+import Dialog from '../../dialog/Dialog';
+import { Icon, Icons } from '../../Icon';
+import { useValidate } from '../../../hooks/api';
+import { Label } from '../../inputs/Label';
+import Toggle from '../../toggle/Toggle';
+import ValidationResults from '../../ValidationResults';
+
+interface Props {
+ cluster: pb.Cluster;
+}
+
+const ClusterRow: React.FC
= ({ cluster }) => {
+ const [isOpen, setIsOpen] = useState(false);
+ const [pingTablets, setPingTablets] = useState(false);
+
+ const { mutate, error, data, isIdle, reset } = useValidate({ clusterID: cluster.id, pingTablets });
+ const closeDialog = () => {
+ setIsOpen(false);
+ reset();
+ };
+ return (
+
+ mutate({ pingTablets, clusterID: cluster.id })}
+ loadingText="Validating"
+ onCancel={closeDialog}
+ onClose={closeDialog}
+ hideCancel={!isIdle}
+ title={!isIdle ? undefined : 'Validate'}
+ className="min-w-[400px]"
+ >
+
+ {isIdle && (
+
+
+ Validate that all nodes in the cluster are reachable from the global replication graph,
+ as well as all tablets in discoverable cells, are consistent.
+
+
+ setPingTablets(!pingTablets)} />
+
+
+ When set, all tablets will be pinged during the validation process.
+
+ )}
+ {!isIdle && !error && (
+
+
+
+
+ Successfully validated cluster {cluster.name}
+
+
+ {data?.results_by_keyspace && (
+
+ )}
+
+ )}
+ {!isIdle && error && (
+
+
+
+
+
+ There was an issue validating nodes in cluster {cluster.name}
+
+
+ )}
+
+
+ {cluster.name}
+ {cluster.id}
+
+ {
+ setIsOpen(true);
+ }}
+ >
+ Validate
+
+
+
+ );
+};
+
+export default ClusterRow;
diff --git a/web/vtadmin/src/components/routes/shard/Advanced.tsx b/web/vtadmin/src/components/routes/shard/Advanced.tsx
index 3bf8b0ade6b..fc765e848a1 100644
--- a/web/vtadmin/src/components/routes/shard/Advanced.tsx
+++ b/web/vtadmin/src/components/routes/shard/Advanced.tsx
@@ -8,6 +8,8 @@ import {
useReloadSchemaShard,
useTabletExternallyPromoted,
useTablets,
+ useValidateShard,
+ useValidateVersionShard,
} from '../../../hooks/api';
import { useDocumentTitle } from '../../../hooks/useDocumentTitle';
import ActionPanel from '../../ActionPanel';
@@ -19,9 +21,10 @@ import { TextInput } from '../../TextInput';
import { NumberInput } from '../../NumberInput';
import { Select } from '../../inputs/Select';
import { formatAlias, formatDisplayType } from '../../../util/tablets';
-import { logutil, vtadmin } from '../../../proto/vtadmin';
+import { logutil, vtadmin, vtctldata } from '../../../proto/vtadmin';
import Dialog from '../../dialog/Dialog';
import EventLogEntry from './EventLogEntry';
+import ValidationResults from '../../ValidationResults';
interface RouteParams {
clusterID: string;
keyspace: string;
@@ -40,7 +43,7 @@ const Advanced: React.FC = () => {
const { data: tablets = [] } = useTablets();
// dialog parameters
- const [isOpen, setIsOpen] = useState(false);
+ const [failoverDialogIsOpen, setFailoverDialogIsOpen] = useState(false);
const [dialogTitle, setDialogTitle] = useState('');
const [dialogDescription, setDialogDescription] = useState('');
const [events, setEvents] = useState([]);
@@ -117,7 +120,7 @@ const Advanced: React.FC = () => {
plannedReparentTablet?.tablet?.alias
)}.`
);
- setIsOpen(true);
+ setFailoverDialogIsOpen(true);
setEvents(result.events);
},
onError: (error) =>
@@ -142,7 +145,7 @@ const Advanced: React.FC = () => {
emergencyReparentTablet?.tablet?.alias
)}.`
);
- setIsOpen(true);
+ setFailoverDialogIsOpen(true);
setEvents(result.events);
},
onError: (error) =>
@@ -154,6 +157,62 @@ const Advanced: React.FC = () => {
}
);
+ // validation dialog parameters
+ const [validateDialogTitle, setValidateDialogTitle] = useState('');
+ const [validateDialogDescription, setValidateDialogDescription] = useState('');
+ const onCloseValidateDialog = () => {
+ setValidateDialogIsOpen(false);
+ setValidateDialogTitle('');
+ setValidateDialogDescription('');
+ setValidateShardResponse(null);
+ setValidateVersionShardResponse(null);
+ };
+
+ // validateShard parameters
+ const [pingTablets, setPingTablets] = useState(false);
+ const [validateDialogIsOpen, setValidateDialogIsOpen] = useState(false);
+ const [validateShardResponse, setValidateShardResponse] = useState(null);
+
+ const validateShardMutation = useValidateShard(
+ {
+ clusterID: params.clusterID,
+ keyspace: params.keyspace,
+ shard: params.shard,
+ pingTablets,
+ },
+ {
+ onSuccess: (result) => {
+ setValidateDialogTitle('Validate Shard');
+ setValidateDialogDescription(`Successfully validated ${params.shard}.`);
+ setValidateDialogIsOpen(true);
+ setValidateShardResponse(result);
+ },
+ onError: (error) => warn(`There was an error validating shard ${params.shard}: ${error}`),
+ }
+ );
+
+ const [
+ validateVersionShardResponse,
+ setValidateVersionShardResponse,
+ ] = useState(null);
+
+ const validateVersionShardMutation = useValidateVersionShard(
+ {
+ clusterID: params.clusterID,
+ keyspace: params.keyspace,
+ shard: params.shard,
+ },
+ {
+ onSuccess: (result) => {
+ setValidateDialogTitle('Validate Version Shard');
+ setValidateDialogDescription(`Successfully validated versions on ${params.shard}.`);
+ setValidateDialogIsOpen(true);
+ setValidateShardResponse(result);
+ },
+ onError: (error) => warn(`There was an error validating versions on shard ${params.shard}: ${error}`),
+ }
+ );
+
if (kq.error) {
return (
@@ -171,8 +230,9 @@ const Advanced: React.FC = () => {
<>
setIsOpen(false)}
+ isOpen={failoverDialogIsOpen}
+ onClose={() => setFailoverDialogIsOpen(false)}
+ onConfirm={() => setFailoverDialogIsOpen(false)}
title={dialogTitle}
hideCancel
confirmText="Dismiss"
@@ -187,9 +247,71 @@ const Advanced: React.FC = () => {
>
+
+ <>
+ {validateDialogDescription}
+ {validateShardResponse && (
+
+ )}
+ >
+
Status
+
+
+ Validates that all nodes reachable from the specified shard{' '}
+ {shardName} are consistent.
+ >
+ }
+ documentationLink="https://vitess.io/docs/reference/programs/vtctldclient/vtctldclient_validateshard/#vtctldclient-validateshard"
+ loadingText="Validating shard..."
+ loadedText="Validate"
+ mutation={validateShardMutation as UseMutationResult}
+ title="Validate Shard"
+ body={
+ <>
+
+
+ setPingTablets(!pingTablets)}
+ />
+
+
+ When set, all tablets are pinged during the validation process.
+
+ >
+ }
+ />
+
+ Validates that the version on the primary matches all of the replicas on shard{' '}
+ {shardName} .
+ >
+ }
+ documentationLink="https://vitess.io/docs/reference/programs/vtctl/schema-version-permissions/#validateversionshard"
+ loadingText="Validating shard versions..."
+ loadedText="Validate"
+ mutation={validateVersionShardMutation as UseMutationResult}
+ title="Validate Version Shard"
+ />
+
@@ -281,6 +403,7 @@ const Advanced: React.FC = () => {
selectedItem={tablet}
placeholder="Tablet"
description="This chosen tablet will be considered the shard primary (but Vitess won't change the replication setup)."
+ required
/>
@@ -353,6 +476,7 @@ const Advanced: React.FC = () => {
setPlannedReparentTablet(t as vtadmin.Tablet)}
label="Tablet"
items={tabletsInCluster}
@@ -386,6 +510,7 @@ const Advanced: React.FC = () => {
setEmergencyReparentTablet(t as vtadmin.Tablet)}
label="Tablet"
items={tabletsInCluster}
diff --git a/web/vtadmin/src/components/routes/tablet/FullStatus.tsx b/web/vtadmin/src/components/routes/tablet/FullStatus.tsx
new file mode 100644
index 00000000000..8d0d02b2905
--- /dev/null
+++ b/web/vtadmin/src/components/routes/tablet/FullStatus.tsx
@@ -0,0 +1,57 @@
+import React from 'react';
+import { Link } from 'react-router-dom';
+import { useGetFullStatus } from '../../../hooks/api';
+import { vtadmin } from '../../../proto/vtadmin';
+import { formatAlias } from '../../../util/tablets';
+import { Code } from '../../Code';
+import style from './Tablet.module.scss';
+
+interface Props {
+ tablet: vtadmin.Tablet;
+}
+
+function stateReplacer(key: string, val: number) {
+ if (key === 'io_state' || key === 'sql_state') {
+ if (val === 3) {
+ return 'Running';
+ } else if (val === 2) {
+ return 'Connecting';
+ } else if (val === 1) {
+ return 'Stopped';
+ }
+ }
+ return val;
+}
+
+const FullStatus: React.FC = ({ tablet }) => {
+ const { data, error } = useGetFullStatus({
+ // Ok to use ? operator here; if params are null
+ // will fall back to error = true case
+ clusterID: tablet.cluster?.id as string,
+ alias: formatAlias(tablet.tablet?.alias) as string,
+ });
+
+ if (error) {
+ return (
+
+
😰
+
An error occurred
+
{error.message}
+
+ ← All tablets
+
+
+ );
+ }
+
+ if (data && data.status) {
+ data.status.semi_sync_primary_enabled = !!data.status.semi_sync_primary_enabled;
+ data.status.semi_sync_replica_enabled = !!data.status.semi_sync_replica_enabled;
+ data.status.semi_sync_primary_status = !!data.status.semi_sync_primary_status;
+ data.status.semi_sync_replica_status = !!data.status.semi_sync_replica_status;
+ }
+
+ return
;
+};
+
+export default FullStatus;
diff --git a/web/vtadmin/src/components/routes/tablet/Tablet.tsx b/web/vtadmin/src/components/routes/tablet/Tablet.tsx
index 6e8695cc649..8924244be04 100644
--- a/web/vtadmin/src/components/routes/tablet/Tablet.tsx
+++ b/web/vtadmin/src/components/routes/tablet/Tablet.tsx
@@ -32,8 +32,8 @@ import { TabContainer } from '../../tabs/TabContainer';
import Advanced from './Advanced';
import style from './Tablet.module.scss';
import { TabletCharts } from './TabletCharts';
-import { TabletReplication } from './TabletReplication';
import { env } from '../../../util/env';
+import FullStatus from './FullStatus';
interface RouteParams {
alias: string;
@@ -48,6 +48,7 @@ export const Tablet = () => {
const { data: tablet, ...tq } = useTablet({ alias, clusterID });
const { data: debugVars } = useExperimentalTabletDebugVars({ alias, clusterID });
+
if (tq.error) {
return (
@@ -105,9 +106,8 @@ export const Tablet = () => {
-
+
-
@@ -118,10 +118,6 @@ export const Tablet = () => {
-
-
-
-
@@ -132,6 +128,8 @@ export const Tablet = () => {
+ {tablet && }
+
{!isReadOnlyMode() && (
diff --git a/web/vtadmin/src/components/routes/tablet/TabletReplication.tsx b/web/vtadmin/src/components/routes/tablet/TabletReplication.tsx
deleted file mode 100644
index 9519f3cc2a0..00000000000
--- a/web/vtadmin/src/components/routes/tablet/TabletReplication.tsx
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Copyright 2022 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import { isEmpty } from 'lodash-es';
-import { useShardReplicationPositions } from '../../../hooks/api';
-import { vtadmin as pb } from '../../../proto/vtadmin';
-import { formatAlias, formatPaddedAlias } from '../../../util/tablets';
-import { Code } from '../../Code';
-import { QueryErrorPlaceholder } from '../../placeholders/QueryErrorPlaceholder';
-import { QueryLoadingPlaceholder } from '../../placeholders/QueryLoadingPlaceholder';
-
-interface Props {
- tablet?: pb.Tablet;
-}
-
-export const TabletReplication: React.FC = ({ tablet }) => {
- const clusterID = tablet?.cluster?.id;
- const keyspace = tablet?.tablet?.keyspace;
- const shard = tablet?.tablet?.shard;
- const keyspaceShard = keyspace && shard ? `${keyspace}/${shard}` : null;
- const alias = formatAlias(tablet?.tablet?.alias);
- const paddedAlias = tablet ? formatPaddedAlias(tablet.tablet?.alias) : null;
-
- const q = useShardReplicationPositions({
- clusterIDs: [clusterID],
- keyspaces: [keyspace],
- keyspaceShards: [keyspaceShard],
- });
-
- // Loading of the tablet itself is (for now) handled by the parent component,
- // so allow it to handle the loading display, too.
- if (!tablet) {
- return null;
- }
-
- const positionsForShard = q.data?.replication_positions.find(
- (p) => p.cluster?.id === clusterID && p.keyspace === keyspace && p.shard === shard
- );
-
- const replicationStatuses = positionsForShard?.position_info?.replication_statuses;
- const replicationStatus = replicationStatuses && paddedAlias ? replicationStatuses[paddedAlias] : null;
-
- let content = null;
- if (q.isSuccess) {
- content = isEmpty(replicationStatus) ? (
- No replication status for {alias}
- ) : (
-
- );
- }
-
- return (
-
-
-
- {content}
-
- );
-};
diff --git a/web/vtadmin/src/components/routes/topology/ClusterTopology.tsx b/web/vtadmin/src/components/routes/topology/ClusterTopology.tsx
new file mode 100644
index 00000000000..1af63261447
--- /dev/null
+++ b/web/vtadmin/src/components/routes/topology/ClusterTopology.tsx
@@ -0,0 +1,148 @@
+/**
+ * Copyright 2022 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import React, { useEffect, useState } from 'react';
+
+import { useTopologyPath } from '../../../hooks/api';
+import { useDocumentTitle } from '../../../hooks/useDocumentTitle';
+import { ContentContainer } from '../../layout/ContentContainer';
+import { NavCrumbs } from '../../layout/NavCrumbs';
+import { WorkspaceHeader } from '../../layout/WorkspaceHeader';
+import { WorkspaceTitle } from '../../layout/WorkspaceTitle';
+import { Link, useParams } from 'react-router-dom';
+import { generateGraph, TopologyCell, TopologyCellChild } from './Nodes';
+import ReactFlow, {
+ addEdge,
+ MiniMap,
+ Controls,
+ Background,
+ useNodesState,
+ useEdgesState,
+ Connection,
+} from 'react-flow-renderer';
+import { getTopologyPath } from '../../../api/http';
+
+export const ClusterTopology = () => {
+ interface RouteParams {
+ clusterID: string;
+ }
+ useDocumentTitle('Cluster Topolgy');
+ const { clusterID } = useParams();
+ const { data } = useTopologyPath({ clusterID, path: '/' });
+ const [topology, setTopology] = useState<{ cell: TopologyCell }>({ cell: data?.cell as TopologyCell });
+
+ const [nodes, setNodes, onNodesChange] = useNodesState([]);
+ const [edges, setEdges, onEdgesChange] = useEdgesState([]);
+
+ const onConnect = (params: Connection) => setEdges((eds) => addEdge(params, eds));
+ const onExpand = async (path: string) => {
+ const { cell } = await getTopologyPath({ clusterID, path });
+ const newTopo = { ...topology };
+ newTopo.cell.children = placeCell(newTopo.cell, cell as TopologyCell);
+ setTopology(newTopo);
+ };
+
+ const placeCell = (currentCell: TopologyCell, newCell: TopologyCell): TopologyCellChild[] => {
+ const newChildren: TopologyCellChild[] = [];
+ currentCell.children?.forEach((c) => {
+ if (typeof c === 'string' && c === newCell?.name) {
+ newChildren.push(newCell as TopologyCell);
+ }
+ if (typeof c == 'string' && c !== newCell?.name) {
+ newChildren.push(c);
+ }
+ if (typeof c !== 'string') {
+ c.children = placeCell(c, newCell);
+ newChildren.push(c);
+ }
+ });
+ return newChildren;
+ };
+
+ useEffect(() => {
+ const { nodes: initialNodes, edges: initialEdges } = topology
+ ? generateGraph(topology, onExpand)
+ : { nodes: [], edges: [] };
+ setNodes(initialNodes);
+ setEdges(initialEdges);
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [topology]);
+
+ useEffect(() => {
+ if (data?.cell) {
+ setTopology({ cell: data?.cell as TopologyCell });
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [data]);
+
+ if (!data) {
+ return (
+
+
+
+ Topology
+
+
+ {clusterID}
+
+
+ 404
+
+ );
+ }
+
+ return (
+
+
+
+ Topology
+
+
+ {clusterID}
+
+
+
+
+ {
+ if (n.style?.background) return n.style.background as string;
+ if (n.type === 'input') return '#0041d0';
+ if (n.type === 'output') return '#ff0072';
+ if (n.type === 'default') return '#1a192b';
+
+ return '#eee';
+ }}
+ nodeColor={(n) => {
+ if (n.style?.background) return n.style.background as string;
+
+ return '#fff';
+ }}
+ nodeBorderRadius={2}
+ />
+
+
+
+
+
+ );
+};
diff --git a/web/vtadmin/src/components/routes/topology/Nodes.tsx b/web/vtadmin/src/components/routes/topology/Nodes.tsx
new file mode 100644
index 00000000000..ab4431da950
--- /dev/null
+++ b/web/vtadmin/src/components/routes/topology/Nodes.tsx
@@ -0,0 +1,129 @@
+/**
+ * Copyright 2022 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import React from 'react';
+import { MarkerType, Node, Edge } from 'react-flow-renderer';
+
+export interface TopologyCell {
+ name?: string;
+ data?: string;
+ path: string;
+ children?: TopologyCellChild[];
+}
+
+export type TopologyCellChild = string | TopologyCell;
+
+export const generateGraph = (
+ topology: { cell: TopologyCellChild },
+ onExpand: (path: string) => void
+): { nodes: Array; edges: Array } => {
+ return getNodesAndEdges(topology.cell as TopologyCell, '', -1, 0, onExpand);
+};
+
+const getNodesAndEdges = (
+ cell: TopologyCellChild,
+ path: string,
+ depth: number,
+ width: number,
+ onExpand: (path: string) => void
+): { nodes: Array; edges: Array } => {
+ const isCell = typeof cell !== 'string';
+ const isString = !isCell;
+ const nodes: Array = [];
+ const edges: Array = [];
+ if (isString || cell?.name) {
+ const parentNode: Node = {
+ id: path,
+ position: { y: depth * 100, x: width * 150 },
+ style: { width: 'min-content' },
+ data: {
+ label:
+ isCell && cell?.data ? (
+
+
{cell.name}
+
+ {cell.data}
+
+
+ ) : (
+
+ {typeof cell === 'string' ? cell : cell.name}
+ onExpand(path)} className="btn btn-secondary btn-sm mt-1">
+ Expand
+
+
+ ),
+ },
+ };
+
+ if (depth === 0) {
+ parentNode.type = 'input';
+ }
+
+ if (isCell && !cell?.children) {
+ parentNode.type = 'output';
+ }
+
+ nodes.push(parentNode);
+ }
+
+ if (isCell && cell?.children) {
+ let offset = 0;
+ cell.children.forEach((child, i) => {
+ const childPath = `${path}/${typeof child == 'string' ? child : child.name}`;
+ if (path !== '') {
+ edges.push({
+ id: `${path}-${childPath}`,
+ source: path,
+ target: childPath,
+ markerEnd: {
+ type: MarkerType.ArrowClosed,
+ },
+ });
+ }
+
+ const { nodes: childNodes, edges: childEdges } = getNodesAndEdges(
+ child,
+ childPath,
+ depth + 1,
+ width + offset,
+ onExpand
+ );
+ nodes.push(...childNodes);
+ edges.push(...childEdges);
+ offset += maxWidth(child);
+ });
+ }
+
+ return {
+ nodes,
+ edges,
+ };
+};
+
+const maxWidth = (cell: TopologyCellChild): number => {
+ let width = 0;
+
+ if (typeof cell == 'string' || !cell.children || cell.children?.length === 0) {
+ return 1;
+ }
+
+ cell.children?.forEach((child) => {
+ const childWidth = maxWidth(child);
+ width += childWidth;
+ });
+
+ return width;
+};
diff --git a/web/vtadmin/src/components/routes/topology/Topology.tsx b/web/vtadmin/src/components/routes/topology/Topology.tsx
new file mode 100644
index 00000000000..b4a65c3e8df
--- /dev/null
+++ b/web/vtadmin/src/components/routes/topology/Topology.tsx
@@ -0,0 +1,73 @@
+/**
+ * Copyright 2022 The Vitess Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { orderBy } from 'lodash-es';
+import * as React from 'react';
+
+import { useClusters } from '../../../hooks/api';
+import { useDocumentTitle } from '../../../hooks/useDocumentTitle';
+import { DataTable } from '../../dataTable/DataTable';
+import { vtadmin as pb } from '../../../proto/vtadmin';
+import { DataCell } from '../../dataTable/DataCell';
+import { ContentContainer } from '../../layout/ContentContainer';
+import { WorkspaceHeader } from '../../layout/WorkspaceHeader';
+import { WorkspaceTitle } from '../../layout/WorkspaceTitle';
+import { Link } from 'react-router-dom';
+
+const TopologyLink: React.FC<{ clusterID: string }> = ({ clusterID, children }) => {
+ const to = {
+ pathname: `/topology/${clusterID}`,
+ };
+
+ return (
+
+ {children}
+
+ );
+};
+export const Topology = () => {
+ useDocumentTitle('Topology');
+ const { data } = useClusters();
+
+ const rows = React.useMemo(() => {
+ return orderBy(data, ['name']);
+ }, [data]);
+
+ const renderRows = (rows: pb.Cluster[]) =>
+ rows.map((cluster, idx) => (
+
+ {cluster.name}
+ {cluster.id}
+
+ View Topology
+
+
+ ));
+
+ return (
+
+ );
+};
diff --git a/web/vtadmin/src/hooks/api.ts b/web/vtadmin/src/hooks/api.ts
index 5a593faec73..62ec56c8377 100644
--- a/web/vtadmin/src/hooks/api.ts
+++ b/web/vtadmin/src/hooks/api.ts
@@ -68,8 +68,18 @@ import {
rebuildKeyspaceGraph,
removeKeyspaceCell,
createShard,
+ validate,
+ ValidateParams,
+ validateShard,
+ ValidateShardParams,
+ getFullStatus,
+ GetFullStatusParams,
+ validateVersionShard,
+ ValidateVersionShardParams,
+ GetTopologyPathParams,
+ getTopologyPath,
} from '../api/http';
-import { vtadmin as pb } from '../proto/vtadmin';
+import { vtadmin as pb, vtctldata } from '../proto/vtadmin';
import { formatAlias } from '../util/tablets';
/**
@@ -554,3 +564,58 @@ export const useCreateShard = (
return createShard(params);
}, options);
};
+
+/**
+ * useValidate is a mutate hook that validates that all nodes reachable from the global replication graph,
+ * as well as all tablets in discoverable cells, are consistent.
+ */
+export const useValidate = (
+ params: Parameters[0],
+ options?: UseMutationOptions>, Error, ValidateParams>
+) => {
+ return useMutation>, Error, ValidateParams>(() => {
+ return validate(params);
+ }, options);
+};
+
+/**
+ * useValidateShard is a mutate hook that validates that that all nodes
+ * reachable from the specified shard are consistent.
+ */
+export const useValidateShard = (
+ params: Parameters[0],
+ options?: UseMutationOptions>, Error, ValidateShardParams>
+) => {
+ return useMutation>, Error, ValidateShardParams>(() => {
+ return validateShard(params);
+ }, options);
+};
+
+/**
+ * useGetFullStatus is a query hook that fetches the full status of a tablet
+ */
+export const useGetFullStatus = (
+ params: GetFullStatusParams,
+ options?: UseQueryOptions | undefined
+) => useQuery(['full-status', params], () => getFullStatus(params), options);
+
+/**
+ * useValidateVersionShard is a mutate hook that validates that the version on the primary matches all of the replicas.
+ */
+export const useValidateVersionShard = (
+ params: Parameters[0],
+ options?: UseMutationOptions>, Error, ValidateVersionShardParams>
+) => {
+ return useMutation>, Error, ValidateVersionShardParams>(() => {
+ return validateVersionShard(params);
+ }, options);
+};
+/*
+ * useTopologyPath is a query hook that fetches a cell at the specified path in the topology server.
+ */
+export const useTopologyPath = (
+ params: GetTopologyPathParams,
+ options?: UseQueryOptions | undefined
+) => {
+ return useQuery(['topology-path', params], () => getTopologyPath(params));
+};
diff --git a/web/vtadmin/src/icons/index.tsx b/web/vtadmin/src/icons/index.tsx
index 60841f9bf94..473ee34c1f0 100644
--- a/web/vtadmin/src/icons/index.tsx
+++ b/web/vtadmin/src/icons/index.tsx
@@ -11,3 +11,4 @@ export { ReactComponent as Open } from './open.svg';
export { ReactComponent as Question } from './question.svg';
export { ReactComponent as RunQuery } from './runQuery.svg';
export { ReactComponent as Search } from './search.svg';
+export { ReactComponent as Topology } from './topology.svg';
diff --git a/web/vtadmin/src/icons/topology.svg b/web/vtadmin/src/icons/topology.svg
new file mode 100644
index 00000000000..99034434210
--- /dev/null
+++ b/web/vtadmin/src/icons/topology.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts
index ef5ad5b468b..61a33684a72 100644
--- a/web/vtadmin/src/proto/vtadmin.d.ts
+++ b/web/vtadmin/src/proto/vtadmin.d.ts
@@ -176,6 +176,20 @@ export namespace vtadmin {
*/
public getClusters(request: vtadmin.IGetClustersRequest): Promise;
+ /**
+ * Calls GetFullStatus.
+ * @param request GetFullStatusRequest message or plain object
+ * @param callback Node-style callback called with the error, if any, and GetFullStatusResponse
+ */
+ public getFullStatus(request: vtadmin.IGetFullStatusRequest, callback: vtadmin.VTAdmin.GetFullStatusCallback): void;
+
+ /**
+ * Calls GetFullStatus.
+ * @param request GetFullStatusRequest message or plain object
+ * @returns Promise
+ */
+ public getFullStatus(request: vtadmin.IGetFullStatusRequest): Promise;
+
/**
* Calls GetGates.
* @param request GetGatesRequest message or plain object
@@ -316,6 +330,20 @@ export namespace vtadmin {
*/
public getTablets(request: vtadmin.IGetTabletsRequest): Promise;
+ /**
+ * Calls GetTopologyPath.
+ * @param request GetTopologyPathRequest message or plain object
+ * @param callback Node-style callback called with the error, if any, and GetTopologyPathResponse
+ */
+ public getTopologyPath(request: vtadmin.IGetTopologyPathRequest, callback: vtadmin.VTAdmin.GetTopologyPathCallback): void;
+
+ /**
+ * Calls GetTopologyPath.
+ * @param request GetTopologyPathRequest message or plain object
+ * @returns Promise
+ */
+ public getTopologyPath(request: vtadmin.IGetTopologyPathRequest): Promise;
+
/**
* Calls GetVSchema.
* @param request GetVSchemaRequest message or plain object
@@ -582,6 +610,20 @@ export namespace vtadmin {
*/
public tabletExternallyPromoted(request: vtadmin.ITabletExternallyPromotedRequest): Promise;
+ /**
+ * Calls Validate.
+ * @param request ValidateRequest message or plain object
+ * @param callback Node-style callback called with the error, if any, and ValidateResponse
+ */
+ public validate(request: vtadmin.IValidateRequest, callback: vtadmin.VTAdmin.ValidateCallback): void;
+
+ /**
+ * Calls Validate.
+ * @param request ValidateRequest message or plain object
+ * @returns Promise
+ */
+ public validate(request: vtadmin.IValidateRequest): Promise;
+
/**
* Calls ValidateKeyspace.
* @param request ValidateKeyspaceRequest message or plain object
@@ -610,6 +652,20 @@ export namespace vtadmin {
*/
public validateSchemaKeyspace(request: vtadmin.IValidateSchemaKeyspaceRequest): Promise;
+ /**
+ * Calls ValidateShard.
+ * @param request ValidateShardRequest message or plain object
+ * @param callback Node-style callback called with the error, if any, and ValidateShardResponse
+ */
+ public validateShard(request: vtadmin.IValidateShardRequest, callback: vtadmin.VTAdmin.ValidateShardCallback): void;
+
+ /**
+ * Calls ValidateShard.
+ * @param request ValidateShardRequest message or plain object
+ * @returns Promise
+ */
+ public validateShard(request: vtadmin.IValidateShardRequest): Promise;
+
/**
* Calls ValidateVersionKeyspace.
* @param request ValidateVersionKeyspaceRequest message or plain object
@@ -624,6 +680,20 @@ export namespace vtadmin {
*/
public validateVersionKeyspace(request: vtadmin.IValidateVersionKeyspaceRequest): Promise;
+ /**
+ * Calls ValidateVersionShard.
+ * @param request ValidateVersionShardRequest message or plain object
+ * @param callback Node-style callback called with the error, if any, and ValidateVersionShardResponse
+ */
+ public validateVersionShard(request: vtadmin.IValidateVersionShardRequest, callback: vtadmin.VTAdmin.ValidateVersionShardCallback): void;
+
+ /**
+ * Calls ValidateVersionShard.
+ * @param request ValidateVersionShardRequest message or plain object
+ * @returns Promise
+ */
+ public validateVersionShard(request: vtadmin.IValidateVersionShardRequest): Promise;
+
/**
* Calls VTExplain.
* @param request VTExplainRequest message or plain object
@@ -718,6 +788,13 @@ export namespace vtadmin {
*/
type GetClustersCallback = (error: (Error|null), response?: vtadmin.GetClustersResponse) => void;
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#getFullStatus}.
+ * @param error Error, if any
+ * @param [response] GetFullStatusResponse
+ */
+ type GetFullStatusCallback = (error: (Error|null), response?: vtctldata.GetFullStatusResponse) => void;
+
/**
* Callback as used by {@link vtadmin.VTAdmin#getGates}.
* @param error Error, if any
@@ -788,6 +865,13 @@ export namespace vtadmin {
*/
type GetTabletsCallback = (error: (Error|null), response?: vtadmin.GetTabletsResponse) => void;
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#getTopologyPath}.
+ * @param error Error, if any
+ * @param [response] GetTopologyPathResponse
+ */
+ type GetTopologyPathCallback = (error: (Error|null), response?: vtctldata.GetTopologyPathResponse) => void;
+
/**
* Callback as used by {@link vtadmin.VTAdmin#getVSchema}.
* @param error Error, if any
@@ -921,6 +1005,13 @@ export namespace vtadmin {
*/
type TabletExternallyPromotedCallback = (error: (Error|null), response?: vtadmin.TabletExternallyPromotedResponse) => void;
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#validate}.
+ * @param error Error, if any
+ * @param [response] ValidateResponse
+ */
+ type ValidateCallback = (error: (Error|null), response?: vtctldata.ValidateResponse) => void;
+
/**
* Callback as used by {@link vtadmin.VTAdmin#validateKeyspace}.
* @param error Error, if any
@@ -935,6 +1026,13 @@ export namespace vtadmin {
*/
type ValidateSchemaKeyspaceCallback = (error: (Error|null), response?: vtctldata.ValidateSchemaKeyspaceResponse) => void;
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#validateShard}.
+ * @param error Error, if any
+ * @param [response] ValidateShardResponse
+ */
+ type ValidateShardCallback = (error: (Error|null), response?: vtctldata.ValidateShardResponse) => void;
+
/**
* Callback as used by {@link vtadmin.VTAdmin#validateVersionKeyspace}.
* @param error Error, if any
@@ -942,6 +1040,13 @@ export namespace vtadmin {
*/
type ValidateVersionKeyspaceCallback = (error: (Error|null), response?: vtctldata.ValidateVersionKeyspaceResponse) => void;
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#validateVersionShard}.
+ * @param error Error, if any
+ * @param [response] ValidateVersionShardResponse
+ */
+ type ValidateVersionShardCallback = (error: (Error|null), response?: vtctldata.ValidateVersionShardResponse) => void;
+
/**
* Callback as used by {@link vtadmin.VTAdmin#vTExplain}.
* @param error Error, if any
@@ -4425,6 +4530,102 @@ export namespace vtadmin {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a GetFullStatusRequest. */
+ interface IGetFullStatusRequest {
+
+ /** GetFullStatusRequest cluster_id */
+ cluster_id?: (string|null);
+
+ /** GetFullStatusRequest alias */
+ alias?: (topodata.ITabletAlias|null);
+ }
+
+ /** Represents a GetFullStatusRequest. */
+ class GetFullStatusRequest implements IGetFullStatusRequest {
+
+ /**
+ * Constructs a new GetFullStatusRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtadmin.IGetFullStatusRequest);
+
+ /** GetFullStatusRequest cluster_id. */
+ public cluster_id: string;
+
+ /** GetFullStatusRequest alias. */
+ public alias?: (topodata.ITabletAlias|null);
+
+ /**
+ * Creates a new GetFullStatusRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns GetFullStatusRequest instance
+ */
+ public static create(properties?: vtadmin.IGetFullStatusRequest): vtadmin.GetFullStatusRequest;
+
+ /**
+ * Encodes the specified GetFullStatusRequest message. Does not implicitly {@link vtadmin.GetFullStatusRequest.verify|verify} messages.
+ * @param message GetFullStatusRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtadmin.IGetFullStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified GetFullStatusRequest message, length delimited. Does not implicitly {@link vtadmin.GetFullStatusRequest.verify|verify} messages.
+ * @param message GetFullStatusRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtadmin.IGetFullStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a GetFullStatusRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns GetFullStatusRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.GetFullStatusRequest;
+
+ /**
+ * Decodes a GetFullStatusRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns GetFullStatusRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.GetFullStatusRequest;
+
+ /**
+ * Verifies a GetFullStatusRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a GetFullStatusRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns GetFullStatusRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtadmin.GetFullStatusRequest;
+
+ /**
+ * Creates a plain object from a GetFullStatusRequest message. Also converts values to other types if specified.
+ * @param message GetFullStatusRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtadmin.GetFullStatusRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this GetFullStatusRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a GetGatesRequest. */
interface IGetGatesRequest {
@@ -6021,6 +6222,102 @@ export namespace vtadmin {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a GetTopologyPathRequest. */
+ interface IGetTopologyPathRequest {
+
+ /** GetTopologyPathRequest cluster_id */
+ cluster_id?: (string|null);
+
+ /** GetTopologyPathRequest path */
+ path?: (string|null);
+ }
+
+ /** Represents a GetTopologyPathRequest. */
+ class GetTopologyPathRequest implements IGetTopologyPathRequest {
+
+ /**
+ * Constructs a new GetTopologyPathRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtadmin.IGetTopologyPathRequest);
+
+ /** GetTopologyPathRequest cluster_id. */
+ public cluster_id: string;
+
+ /** GetTopologyPathRequest path. */
+ public path: string;
+
+ /**
+ * Creates a new GetTopologyPathRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns GetTopologyPathRequest instance
+ */
+ public static create(properties?: vtadmin.IGetTopologyPathRequest): vtadmin.GetTopologyPathRequest;
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtadmin.GetTopologyPathRequest.verify|verify} messages.
+ * @param message GetTopologyPathRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtadmin.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtadmin.GetTopologyPathRequest.verify|verify} messages.
+ * @param message GetTopologyPathRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtadmin.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.GetTopologyPathRequest;
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.GetTopologyPathRequest;
+
+ /**
+ * Verifies a GetTopologyPathRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns GetTopologyPathRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtadmin.GetTopologyPathRequest;
+
+ /**
+ * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified.
+ * @param message GetTopologyPathRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtadmin.GetTopologyPathRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this GetTopologyPathRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a GetVSchemaRequest. */
interface IGetVSchemaRequest {
@@ -9954,6 +10251,102 @@ export namespace vtadmin {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a ValidateRequest. */
+ interface IValidateRequest {
+
+ /** ValidateRequest cluster_id */
+ cluster_id?: (string|null);
+
+ /** ValidateRequest ping_tablets */
+ ping_tablets?: (boolean|null);
+ }
+
+ /** Represents a ValidateRequest. */
+ class ValidateRequest implements IValidateRequest {
+
+ /**
+ * Constructs a new ValidateRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtadmin.IValidateRequest);
+
+ /** ValidateRequest cluster_id. */
+ public cluster_id: string;
+
+ /** ValidateRequest ping_tablets. */
+ public ping_tablets: boolean;
+
+ /**
+ * Creates a new ValidateRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns ValidateRequest instance
+ */
+ public static create(properties?: vtadmin.IValidateRequest): vtadmin.ValidateRequest;
+
+ /**
+ * Encodes the specified ValidateRequest message. Does not implicitly {@link vtadmin.ValidateRequest.verify|verify} messages.
+ * @param message ValidateRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtadmin.IValidateRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified ValidateRequest message, length delimited. Does not implicitly {@link vtadmin.ValidateRequest.verify|verify} messages.
+ * @param message ValidateRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtadmin.IValidateRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a ValidateRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns ValidateRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.ValidateRequest;
+
+ /**
+ * Decodes a ValidateRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns ValidateRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.ValidateRequest;
+
+ /**
+ * Verifies a ValidateRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a ValidateRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns ValidateRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtadmin.ValidateRequest;
+
+ /**
+ * Creates a plain object from a ValidateRequest message. Also converts values to other types if specified.
+ * @param message ValidateRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtadmin.ValidateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this ValidateRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a ValidateKeyspaceRequest. */
interface IValidateKeyspaceRequest {
@@ -10152,6 +10545,114 @@ export namespace vtadmin {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a ValidateShardRequest. */
+ interface IValidateShardRequest {
+
+ /** ValidateShardRequest cluster_id */
+ cluster_id?: (string|null);
+
+ /** ValidateShardRequest keyspace */
+ keyspace?: (string|null);
+
+ /** ValidateShardRequest shard */
+ shard?: (string|null);
+
+ /** ValidateShardRequest ping_tablets */
+ ping_tablets?: (boolean|null);
+ }
+
+ /** Represents a ValidateShardRequest. */
+ class ValidateShardRequest implements IValidateShardRequest {
+
+ /**
+ * Constructs a new ValidateShardRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtadmin.IValidateShardRequest);
+
+ /** ValidateShardRequest cluster_id. */
+ public cluster_id: string;
+
+ /** ValidateShardRequest keyspace. */
+ public keyspace: string;
+
+ /** ValidateShardRequest shard. */
+ public shard: string;
+
+ /** ValidateShardRequest ping_tablets. */
+ public ping_tablets: boolean;
+
+ /**
+ * Creates a new ValidateShardRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns ValidateShardRequest instance
+ */
+ public static create(properties?: vtadmin.IValidateShardRequest): vtadmin.ValidateShardRequest;
+
+ /**
+ * Encodes the specified ValidateShardRequest message. Does not implicitly {@link vtadmin.ValidateShardRequest.verify|verify} messages.
+ * @param message ValidateShardRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtadmin.IValidateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified ValidateShardRequest message, length delimited. Does not implicitly {@link vtadmin.ValidateShardRequest.verify|verify} messages.
+ * @param message ValidateShardRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtadmin.IValidateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a ValidateShardRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns ValidateShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.ValidateShardRequest;
+
+ /**
+ * Decodes a ValidateShardRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns ValidateShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.ValidateShardRequest;
+
+ /**
+ * Verifies a ValidateShardRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a ValidateShardRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns ValidateShardRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtadmin.ValidateShardRequest;
+
+ /**
+ * Creates a plain object from a ValidateShardRequest message. Also converts values to other types if specified.
+ * @param message ValidateShardRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtadmin.ValidateShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this ValidateShardRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a ValidateVersionKeyspaceRequest. */
interface IValidateVersionKeyspaceRequest {
@@ -10248,6 +10749,108 @@ export namespace vtadmin {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a ValidateVersionShardRequest. */
+ interface IValidateVersionShardRequest {
+
+ /** ValidateVersionShardRequest cluster_id */
+ cluster_id?: (string|null);
+
+ /** ValidateVersionShardRequest keyspace */
+ keyspace?: (string|null);
+
+ /** ValidateVersionShardRequest shard */
+ shard?: (string|null);
+ }
+
+ /** Represents a ValidateVersionShardRequest. */
+ class ValidateVersionShardRequest implements IValidateVersionShardRequest {
+
+ /**
+ * Constructs a new ValidateVersionShardRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtadmin.IValidateVersionShardRequest);
+
+ /** ValidateVersionShardRequest cluster_id. */
+ public cluster_id: string;
+
+ /** ValidateVersionShardRequest keyspace. */
+ public keyspace: string;
+
+ /** ValidateVersionShardRequest shard. */
+ public shard: string;
+
+ /**
+ * Creates a new ValidateVersionShardRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns ValidateVersionShardRequest instance
+ */
+ public static create(properties?: vtadmin.IValidateVersionShardRequest): vtadmin.ValidateVersionShardRequest;
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtadmin.ValidateVersionShardRequest.verify|verify} messages.
+ * @param message ValidateVersionShardRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtadmin.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtadmin.ValidateVersionShardRequest.verify|verify} messages.
+ * @param message ValidateVersionShardRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtadmin.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.ValidateVersionShardRequest;
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.ValidateVersionShardRequest;
+
+ /**
+ * Verifies a ValidateVersionShardRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns ValidateVersionShardRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtadmin.ValidateVersionShardRequest;
+
+ /**
+ * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified.
+ * @param message ValidateVersionShardRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtadmin.ValidateVersionShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this ValidateVersionShardRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a VTExplainRequest. */
interface IVTExplainRequest {
@@ -22480,6 +23083,9 @@ export namespace tabletmanagerdata {
/** BackupRequest allow_primary */
allow_primary?: (boolean|null);
+
+ /** BackupRequest backup_engine */
+ backup_engine?: (string|null);
}
/** Represents a BackupRequest. */
@@ -22497,6 +23103,12 @@ export namespace tabletmanagerdata {
/** BackupRequest allow_primary. */
public allow_primary: boolean;
+ /** BackupRequest backup_engine. */
+ public backup_engine?: (string|null);
+
+ /** BackupRequest _backup_engine. */
+ public _backup_engine?: "backup_engine";
+
/**
* Creates a new BackupRequest instance using the specified properties.
* @param [properties] Properties to set
@@ -22663,6 +23275,9 @@ export namespace tabletmanagerdata {
/** RestoreFromBackupRequest backup_time */
backup_time?: (vttime.ITime|null);
+
+ /** RestoreFromBackupRequest allowed_backup_engines */
+ allowed_backup_engines?: (string[]|null);
}
/** Represents a RestoreFromBackupRequest. */
@@ -22677,6 +23292,9 @@ export namespace tabletmanagerdata {
/** RestoreFromBackupRequest backup_time. */
public backup_time?: (vttime.ITime|null);
+ /** RestoreFromBackupRequest allowed_backup_engines. */
+ public allowed_backup_engines: string[];
+
/**
* Creates a new RestoreFromBackupRequest instance using the specified properties.
* @param [properties] Properties to set
@@ -24388,6 +25006,12 @@ export namespace query {
/** ExecuteOptions has_created_temp_tables */
has_created_temp_tables?: (boolean|null);
+
+ /** ExecuteOptions WorkloadName */
+ WorkloadName?: (string|null);
+
+ /** ExecuteOptions priority */
+ priority?: (string|null);
}
/** Represents an ExecuteOptions. */
@@ -24423,6 +25047,12 @@ export namespace query {
/** ExecuteOptions has_created_temp_tables. */
public has_created_temp_tables: boolean;
+ /** ExecuteOptions WorkloadName. */
+ public WorkloadName: string;
+
+ /** ExecuteOptions priority. */
+ public priority: string;
+
/**
* Creates a new ExecuteOptions instance using the specified properties.
* @param [properties] Properties to set
@@ -30754,6 +31384,12 @@ export namespace replicationdata {
/** Status position */
position?: (string|null);
+ /** Status io_thread_running */
+ io_thread_running?: (boolean|null);
+
+ /** Status sql_thread_running */
+ sql_thread_running?: (boolean|null);
+
/** Status replication_lag_seconds */
replication_lag_seconds?: (number|null);
@@ -30816,6 +31452,9 @@ export namespace replicationdata {
/** Status replication_lag_unknown */
replication_lag_unknown?: (boolean|null);
+
+ /** Status backup_running */
+ backup_running?: (boolean|null);
}
/** Represents a Status. */
@@ -30830,6 +31469,12 @@ export namespace replicationdata {
/** Status position. */
public position: string;
+ /** Status io_thread_running. */
+ public io_thread_running: boolean;
+
+ /** Status sql_thread_running. */
+ public sql_thread_running: boolean;
+
/** Status replication_lag_seconds. */
public replication_lag_seconds: number;
@@ -30893,6 +31538,9 @@ export namespace replicationdata {
/** Status replication_lag_unknown. */
public replication_lag_unknown: boolean;
+ /** Status backup_running. */
+ public backup_running: boolean;
+
/**
* Creates a new Status instance using the specified properties.
* @param [properties] Properties to set
@@ -35028,6 +35676,9 @@ export namespace vtctldata {
/** BackupRequest concurrency */
concurrency?: (number|Long|null);
+
+ /** BackupRequest backup_engine */
+ backup_engine?: (string|null);
}
/** Represents a BackupRequest. */
@@ -35048,6 +35699,12 @@ export namespace vtctldata {
/** BackupRequest concurrency. */
public concurrency: (number|Long);
+ /** BackupRequest backup_engine. */
+ public backup_engine?: (string|null);
+
+ /** BackupRequest _backup_engine. */
+ public _backup_engine?: "backup_engine";
+
/**
* Creates a new BackupRequest instance using the specified properties.
* @param [properties] Properties to set
@@ -37077,6 +37734,9 @@ export namespace vtctldata {
/** EmergencyReparentShardRequest prevent_cross_cell_promotion */
prevent_cross_cell_promotion?: (boolean|null);
+
+ /** EmergencyReparentShardRequest expected_primary */
+ expected_primary?: (topodata.ITabletAlias|null);
}
/** Represents an EmergencyReparentShardRequest. */
@@ -37106,6 +37766,9 @@ export namespace vtctldata {
/** EmergencyReparentShardRequest prevent_cross_cell_promotion. */
public prevent_cross_cell_promotion: boolean;
+ /** EmergencyReparentShardRequest expected_primary. */
+ public expected_primary?: (topodata.ITabletAlias|null);
+
/**
* Creates a new EmergencyReparentShardRequest instance using the specified properties.
* @param [properties] Properties to set
@@ -41458,6 +42121,294 @@ export namespace vtctldata {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a GetTopologyPathRequest. */
+ interface IGetTopologyPathRequest {
+
+ /** GetTopologyPathRequest path */
+ path?: (string|null);
+ }
+
+ /** Represents a GetTopologyPathRequest. */
+ class GetTopologyPathRequest implements IGetTopologyPathRequest {
+
+ /**
+ * Constructs a new GetTopologyPathRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtctldata.IGetTopologyPathRequest);
+
+ /** GetTopologyPathRequest path. */
+ public path: string;
+
+ /**
+ * Creates a new GetTopologyPathRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns GetTopologyPathRequest instance
+ */
+ public static create(properties?: vtctldata.IGetTopologyPathRequest): vtctldata.GetTopologyPathRequest;
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages.
+ * @param message GetTopologyPathRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtctldata.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages.
+ * @param message GetTopologyPathRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtctldata.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTopologyPathRequest;
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTopologyPathRequest;
+
+ /**
+ * Verifies a GetTopologyPathRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns GetTopologyPathRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtctldata.GetTopologyPathRequest;
+
+ /**
+ * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified.
+ * @param message GetTopologyPathRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtctldata.GetTopologyPathRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this GetTopologyPathRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
+ /** Properties of a GetTopologyPathResponse. */
+ interface IGetTopologyPathResponse {
+
+ /** GetTopologyPathResponse cell */
+ cell?: (vtctldata.ITopologyCell|null);
+ }
+
+ /** Represents a GetTopologyPathResponse. */
+ class GetTopologyPathResponse implements IGetTopologyPathResponse {
+
+ /**
+ * Constructs a new GetTopologyPathResponse.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtctldata.IGetTopologyPathResponse);
+
+ /** GetTopologyPathResponse cell. */
+ public cell?: (vtctldata.ITopologyCell|null);
+
+ /**
+ * Creates a new GetTopologyPathResponse instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns GetTopologyPathResponse instance
+ */
+ public static create(properties?: vtctldata.IGetTopologyPathResponse): vtctldata.GetTopologyPathResponse;
+
+ /**
+ * Encodes the specified GetTopologyPathResponse message. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages.
+ * @param message GetTopologyPathResponse message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtctldata.IGetTopologyPathResponse, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified GetTopologyPathResponse message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages.
+ * @param message GetTopologyPathResponse message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtctldata.IGetTopologyPathResponse, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a GetTopologyPathResponse message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns GetTopologyPathResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTopologyPathResponse;
+
+ /**
+ * Decodes a GetTopologyPathResponse message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns GetTopologyPathResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTopologyPathResponse;
+
+ /**
+ * Verifies a GetTopologyPathResponse message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a GetTopologyPathResponse message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns GetTopologyPathResponse
+ */
+ public static fromObject(object: { [k: string]: any }): vtctldata.GetTopologyPathResponse;
+
+ /**
+ * Creates a plain object from a GetTopologyPathResponse message. Also converts values to other types if specified.
+ * @param message GetTopologyPathResponse
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtctldata.GetTopologyPathResponse, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this GetTopologyPathResponse to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
+ /** Properties of a TopologyCell. */
+ interface ITopologyCell {
+
+ /** TopologyCell name */
+ name?: (string|null);
+
+ /** TopologyCell path */
+ path?: (string|null);
+
+ /** TopologyCell data */
+ data?: (string|null);
+
+ /** TopologyCell children */
+ children?: (string[]|null);
+ }
+
+ /** Represents a TopologyCell. */
+ class TopologyCell implements ITopologyCell {
+
+ /**
+ * Constructs a new TopologyCell.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtctldata.ITopologyCell);
+
+ /** TopologyCell name. */
+ public name: string;
+
+ /** TopologyCell path. */
+ public path: string;
+
+ /** TopologyCell data. */
+ public data: string;
+
+ /** TopologyCell children. */
+ public children: string[];
+
+ /**
+ * Creates a new TopologyCell instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns TopologyCell instance
+ */
+ public static create(properties?: vtctldata.ITopologyCell): vtctldata.TopologyCell;
+
+ /**
+ * Encodes the specified TopologyCell message. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages.
+ * @param message TopologyCell message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtctldata.ITopologyCell, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified TopologyCell message, length delimited. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages.
+ * @param message TopologyCell message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtctldata.ITopologyCell, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a TopologyCell message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns TopologyCell
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TopologyCell;
+
+ /**
+ * Decodes a TopologyCell message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns TopologyCell
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TopologyCell;
+
+ /**
+ * Verifies a TopologyCell message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a TopologyCell message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns TopologyCell
+ */
+ public static fromObject(object: { [k: string]: any }): vtctldata.TopologyCell;
+
+ /**
+ * Creates a plain object from a TopologyCell message. Also converts values to other types if specified.
+ * @param message TopologyCell
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtctldata.TopologyCell, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this TopologyCell to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a GetVSchemaRequest. */
interface IGetVSchemaRequest {
@@ -42399,6 +43350,9 @@ export namespace vtctldata {
/** PlannedReparentShardRequest wait_replicas_timeout */
wait_replicas_timeout?: (vttime.IDuration|null);
+
+ /** PlannedReparentShardRequest expected_primary */
+ expected_primary?: (topodata.ITabletAlias|null);
}
/** Represents a PlannedReparentShardRequest. */
@@ -42425,6 +43379,9 @@ export namespace vtctldata {
/** PlannedReparentShardRequest wait_replicas_timeout. */
public wait_replicas_timeout?: (vttime.IDuration|null);
+ /** PlannedReparentShardRequest expected_primary. */
+ public expected_primary?: (topodata.ITabletAlias|null);
+
/**
* Creates a new PlannedReparentShardRequest instance using the specified properties.
* @param [properties] Properties to set
@@ -44688,6 +45645,9 @@ export namespace vtctldata {
/** RestoreFromBackupRequest backup_time */
backup_time?: (vttime.ITime|null);
+
+ /** RestoreFromBackupRequest allowed_backup_engines */
+ allowed_backup_engines?: (string[]|null);
}
/** Represents a RestoreFromBackupRequest. */
@@ -44705,6 +45665,9 @@ export namespace vtctldata {
/** RestoreFromBackupRequest backup_time. */
public backup_time?: (vttime.ITime|null);
+ /** RestoreFromBackupRequest allowed_backup_engines. */
+ public allowed_backup_engines: string[];
+
/**
* Creates a new RestoreFromBackupRequest instance using the specified properties.
* @param [properties] Properties to set
@@ -49462,6 +50425,192 @@ export namespace vtctldata {
public toJSON(): { [k: string]: any };
}
+ /** Properties of a ValidateVersionShardRequest. */
+ interface IValidateVersionShardRequest {
+
+ /** ValidateVersionShardRequest keyspace */
+ keyspace?: (string|null);
+
+ /** ValidateVersionShardRequest shard */
+ shard?: (string|null);
+ }
+
+ /** Represents a ValidateVersionShardRequest. */
+ class ValidateVersionShardRequest implements IValidateVersionShardRequest {
+
+ /**
+ * Constructs a new ValidateVersionShardRequest.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtctldata.IValidateVersionShardRequest);
+
+ /** ValidateVersionShardRequest keyspace. */
+ public keyspace: string;
+
+ /** ValidateVersionShardRequest shard. */
+ public shard: string;
+
+ /**
+ * Creates a new ValidateVersionShardRequest instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns ValidateVersionShardRequest instance
+ */
+ public static create(properties?: vtctldata.IValidateVersionShardRequest): vtctldata.ValidateVersionShardRequest;
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages.
+ * @param message ValidateVersionShardRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtctldata.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages.
+ * @param message ValidateVersionShardRequest message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtctldata.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionShardRequest;
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionShardRequest;
+
+ /**
+ * Verifies a ValidateVersionShardRequest message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns ValidateVersionShardRequest
+ */
+ public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionShardRequest;
+
+ /**
+ * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified.
+ * @param message ValidateVersionShardRequest
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtctldata.ValidateVersionShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this ValidateVersionShardRequest to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
+ /** Properties of a ValidateVersionShardResponse. */
+ interface IValidateVersionShardResponse {
+
+ /** ValidateVersionShardResponse results */
+ results?: (string[]|null);
+ }
+
+ /** Represents a ValidateVersionShardResponse. */
+ class ValidateVersionShardResponse implements IValidateVersionShardResponse {
+
+ /**
+ * Constructs a new ValidateVersionShardResponse.
+ * @param [properties] Properties to set
+ */
+ constructor(properties?: vtctldata.IValidateVersionShardResponse);
+
+ /** ValidateVersionShardResponse results. */
+ public results: string[];
+
+ /**
+ * Creates a new ValidateVersionShardResponse instance using the specified properties.
+ * @param [properties] Properties to set
+ * @returns ValidateVersionShardResponse instance
+ */
+ public static create(properties?: vtctldata.IValidateVersionShardResponse): vtctldata.ValidateVersionShardResponse;
+
+ /**
+ * Encodes the specified ValidateVersionShardResponse message. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages.
+ * @param message ValidateVersionShardResponse message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encode(message: vtctldata.IValidateVersionShardResponse, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Encodes the specified ValidateVersionShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages.
+ * @param message ValidateVersionShardResponse message or plain object to encode
+ * @param [writer] Writer to encode to
+ * @returns Writer
+ */
+ public static encodeDelimited(message: vtctldata.IValidateVersionShardResponse, writer?: $protobuf.Writer): $protobuf.Writer;
+
+ /**
+ * Decodes a ValidateVersionShardResponse message from the specified reader or buffer.
+ * @param reader Reader or buffer to decode from
+ * @param [length] Message length if known beforehand
+ * @returns ValidateVersionShardResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionShardResponse;
+
+ /**
+ * Decodes a ValidateVersionShardResponse message from the specified reader or buffer, length delimited.
+ * @param reader Reader or buffer to decode from
+ * @returns ValidateVersionShardResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionShardResponse;
+
+ /**
+ * Verifies a ValidateVersionShardResponse message.
+ * @param message Plain object to verify
+ * @returns `null` if valid, otherwise the reason why it is not
+ */
+ public static verify(message: { [k: string]: any }): (string|null);
+
+ /**
+ * Creates a ValidateVersionShardResponse message from a plain object. Also converts values to their respective internal types.
+ * @param object Plain object
+ * @returns ValidateVersionShardResponse
+ */
+ public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionShardResponse;
+
+ /**
+ * Creates a plain object from a ValidateVersionShardResponse message. Also converts values to other types if specified.
+ * @param message ValidateVersionShardResponse
+ * @param [options] Conversion options
+ * @returns Plain object
+ */
+ public static toObject(message: vtctldata.ValidateVersionShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any };
+
+ /**
+ * Converts this ValidateVersionShardResponse to JSON.
+ * @returns JSON object
+ */
+ public toJSON(): { [k: string]: any };
+ }
+
/** Properties of a ValidateVSchemaRequest. */
interface IValidateVSchemaRequest {
@@ -50914,7 +52063,8 @@ export namespace binlogdata {
JOURNAL = 16,
VERSION = 17,
LASTPK = 18,
- SAVEPOINT = 19
+ SAVEPOINT = 19,
+ COPY_COMPLETED = 20
}
/** Properties of a RowChange. */
diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js
index 4cd1ae36506..fb15a0e5ab4 100644
--- a/web/vtadmin/src/proto/vtadmin.js
+++ b/web/vtadmin/src/proto/vtadmin.js
@@ -413,6 +413,39 @@ $root.vtadmin = (function() {
* @variation 2
*/
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#getFullStatus}.
+ * @memberof vtadmin.VTAdmin
+ * @typedef GetFullStatusCallback
+ * @type {function}
+ * @param {Error|null} error Error, if any
+ * @param {vtctldata.GetFullStatusResponse} [response] GetFullStatusResponse
+ */
+
+ /**
+ * Calls GetFullStatus.
+ * @function getFullStatus
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IGetFullStatusRequest} request GetFullStatusRequest message or plain object
+ * @param {vtadmin.VTAdmin.GetFullStatusCallback} callback Node-style callback called with the error, if any, and GetFullStatusResponse
+ * @returns {undefined}
+ * @variation 1
+ */
+ Object.defineProperty(VTAdmin.prototype.getFullStatus = function getFullStatus(request, callback) {
+ return this.rpcCall(getFullStatus, $root.vtadmin.GetFullStatusRequest, $root.vtctldata.GetFullStatusResponse, request, callback);
+ }, "name", { value: "GetFullStatus" });
+
+ /**
+ * Calls GetFullStatus.
+ * @function getFullStatus
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IGetFullStatusRequest} request GetFullStatusRequest message or plain object
+ * @returns {Promise} Promise
+ * @variation 2
+ */
+
/**
* Callback as used by {@link vtadmin.VTAdmin#getGates}.
* @memberof vtadmin.VTAdmin
@@ -743,6 +776,39 @@ $root.vtadmin = (function() {
* @variation 2
*/
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#getTopologyPath}.
+ * @memberof vtadmin.VTAdmin
+ * @typedef GetTopologyPathCallback
+ * @type {function}
+ * @param {Error|null} error Error, if any
+ * @param {vtctldata.GetTopologyPathResponse} [response] GetTopologyPathResponse
+ */
+
+ /**
+ * Calls GetTopologyPath.
+ * @function getTopologyPath
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IGetTopologyPathRequest} request GetTopologyPathRequest message or plain object
+ * @param {vtadmin.VTAdmin.GetTopologyPathCallback} callback Node-style callback called with the error, if any, and GetTopologyPathResponse
+ * @returns {undefined}
+ * @variation 1
+ */
+ Object.defineProperty(VTAdmin.prototype.getTopologyPath = function getTopologyPath(request, callback) {
+ return this.rpcCall(getTopologyPath, $root.vtadmin.GetTopologyPathRequest, $root.vtctldata.GetTopologyPathResponse, request, callback);
+ }, "name", { value: "GetTopologyPath" });
+
+ /**
+ * Calls GetTopologyPath.
+ * @function getTopologyPath
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IGetTopologyPathRequest} request GetTopologyPathRequest message or plain object
+ * @returns {Promise} Promise
+ * @variation 2
+ */
+
/**
* Callback as used by {@link vtadmin.VTAdmin#getVSchema}.
* @memberof vtadmin.VTAdmin
@@ -1370,6 +1436,39 @@ $root.vtadmin = (function() {
* @variation 2
*/
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#validate}.
+ * @memberof vtadmin.VTAdmin
+ * @typedef ValidateCallback
+ * @type {function}
+ * @param {Error|null} error Error, if any
+ * @param {vtctldata.ValidateResponse} [response] ValidateResponse
+ */
+
+ /**
+ * Calls Validate.
+ * @function validate
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IValidateRequest} request ValidateRequest message or plain object
+ * @param {vtadmin.VTAdmin.ValidateCallback} callback Node-style callback called with the error, if any, and ValidateResponse
+ * @returns {undefined}
+ * @variation 1
+ */
+ Object.defineProperty(VTAdmin.prototype.validate = function validate(request, callback) {
+ return this.rpcCall(validate, $root.vtadmin.ValidateRequest, $root.vtctldata.ValidateResponse, request, callback);
+ }, "name", { value: "Validate" });
+
+ /**
+ * Calls Validate.
+ * @function validate
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IValidateRequest} request ValidateRequest message or plain object
+ * @returns {Promise} Promise
+ * @variation 2
+ */
+
/**
* Callback as used by {@link vtadmin.VTAdmin#validateKeyspace}.
* @memberof vtadmin.VTAdmin
@@ -1436,6 +1535,39 @@ $root.vtadmin = (function() {
* @variation 2
*/
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#validateShard}.
+ * @memberof vtadmin.VTAdmin
+ * @typedef ValidateShardCallback
+ * @type {function}
+ * @param {Error|null} error Error, if any
+ * @param {vtctldata.ValidateShardResponse} [response] ValidateShardResponse
+ */
+
+ /**
+ * Calls ValidateShard.
+ * @function validateShard
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IValidateShardRequest} request ValidateShardRequest message or plain object
+ * @param {vtadmin.VTAdmin.ValidateShardCallback} callback Node-style callback called with the error, if any, and ValidateShardResponse
+ * @returns {undefined}
+ * @variation 1
+ */
+ Object.defineProperty(VTAdmin.prototype.validateShard = function validateShard(request, callback) {
+ return this.rpcCall(validateShard, $root.vtadmin.ValidateShardRequest, $root.vtctldata.ValidateShardResponse, request, callback);
+ }, "name", { value: "ValidateShard" });
+
+ /**
+ * Calls ValidateShard.
+ * @function validateShard
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IValidateShardRequest} request ValidateShardRequest message or plain object
+ * @returns {Promise} Promise
+ * @variation 2
+ */
+
/**
* Callback as used by {@link vtadmin.VTAdmin#validateVersionKeyspace}.
* @memberof vtadmin.VTAdmin
@@ -1469,6 +1601,39 @@ $root.vtadmin = (function() {
* @variation 2
*/
+ /**
+ * Callback as used by {@link vtadmin.VTAdmin#validateVersionShard}.
+ * @memberof vtadmin.VTAdmin
+ * @typedef ValidateVersionShardCallback
+ * @type {function}
+ * @param {Error|null} error Error, if any
+ * @param {vtctldata.ValidateVersionShardResponse} [response] ValidateVersionShardResponse
+ */
+
+ /**
+ * Calls ValidateVersionShard.
+ * @function validateVersionShard
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IValidateVersionShardRequest} request ValidateVersionShardRequest message or plain object
+ * @param {vtadmin.VTAdmin.ValidateVersionShardCallback} callback Node-style callback called with the error, if any, and ValidateVersionShardResponse
+ * @returns {undefined}
+ * @variation 1
+ */
+ Object.defineProperty(VTAdmin.prototype.validateVersionShard = function validateVersionShard(request, callback) {
+ return this.rpcCall(validateVersionShard, $root.vtadmin.ValidateVersionShardRequest, $root.vtctldata.ValidateVersionShardResponse, request, callback);
+ }, "name", { value: "ValidateVersionShard" });
+
+ /**
+ * Calls ValidateVersionShard.
+ * @function validateVersionShard
+ * @memberof vtadmin.VTAdmin
+ * @instance
+ * @param {vtadmin.IValidateVersionShardRequest} request ValidateVersionShardRequest message or plain object
+ * @returns {Promise} Promise
+ * @variation 2
+ */
+
/**
* Callback as used by {@link vtadmin.VTAdmin#vTExplain}.
* @memberof vtadmin.VTAdmin
@@ -9962,6 +10127,221 @@ $root.vtadmin = (function() {
return GetClustersResponse;
})();
+ vtadmin.GetFullStatusRequest = (function() {
+
+ /**
+ * Properties of a GetFullStatusRequest.
+ * @memberof vtadmin
+ * @interface IGetFullStatusRequest
+ * @property {string|null} [cluster_id] GetFullStatusRequest cluster_id
+ * @property {topodata.ITabletAlias|null} [alias] GetFullStatusRequest alias
+ */
+
+ /**
+ * Constructs a new GetFullStatusRequest.
+ * @memberof vtadmin
+ * @classdesc Represents a GetFullStatusRequest.
+ * @implements IGetFullStatusRequest
+ * @constructor
+ * @param {vtadmin.IGetFullStatusRequest=} [properties] Properties to set
+ */
+ function GetFullStatusRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * GetFullStatusRequest cluster_id.
+ * @member {string} cluster_id
+ * @memberof vtadmin.GetFullStatusRequest
+ * @instance
+ */
+ GetFullStatusRequest.prototype.cluster_id = "";
+
+ /**
+ * GetFullStatusRequest alias.
+ * @member {topodata.ITabletAlias|null|undefined} alias
+ * @memberof vtadmin.GetFullStatusRequest
+ * @instance
+ */
+ GetFullStatusRequest.prototype.alias = null;
+
+ /**
+ * Creates a new GetFullStatusRequest instance using the specified properties.
+ * @function create
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {vtadmin.IGetFullStatusRequest=} [properties] Properties to set
+ * @returns {vtadmin.GetFullStatusRequest} GetFullStatusRequest instance
+ */
+ GetFullStatusRequest.create = function create(properties) {
+ return new GetFullStatusRequest(properties);
+ };
+
+ /**
+ * Encodes the specified GetFullStatusRequest message. Does not implicitly {@link vtadmin.GetFullStatusRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {vtadmin.IGetFullStatusRequest} message GetFullStatusRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetFullStatusRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id);
+ if (message.alias != null && Object.hasOwnProperty.call(message, "alias"))
+ $root.topodata.TabletAlias.encode(message.alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
+ return writer;
+ };
+
+ /**
+ * Encodes the specified GetFullStatusRequest message, length delimited. Does not implicitly {@link vtadmin.GetFullStatusRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {vtadmin.IGetFullStatusRequest} message GetFullStatusRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetFullStatusRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a GetFullStatusRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtadmin.GetFullStatusRequest} GetFullStatusRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetFullStatusRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.GetFullStatusRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.cluster_id = reader.string();
+ break;
+ case 2:
+ message.alias = $root.topodata.TabletAlias.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a GetFullStatusRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtadmin.GetFullStatusRequest} GetFullStatusRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetFullStatusRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a GetFullStatusRequest message.
+ * @function verify
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ GetFullStatusRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ if (!$util.isString(message.cluster_id))
+ return "cluster_id: string expected";
+ if (message.alias != null && message.hasOwnProperty("alias")) {
+ var error = $root.topodata.TabletAlias.verify(message.alias);
+ if (error)
+ return "alias." + error;
+ }
+ return null;
+ };
+
+ /**
+ * Creates a GetFullStatusRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtadmin.GetFullStatusRequest} GetFullStatusRequest
+ */
+ GetFullStatusRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtadmin.GetFullStatusRequest)
+ return object;
+ var message = new $root.vtadmin.GetFullStatusRequest();
+ if (object.cluster_id != null)
+ message.cluster_id = String(object.cluster_id);
+ if (object.alias != null) {
+ if (typeof object.alias !== "object")
+ throw TypeError(".vtadmin.GetFullStatusRequest.alias: object expected");
+ message.alias = $root.topodata.TabletAlias.fromObject(object.alias);
+ }
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a GetFullStatusRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtadmin.GetFullStatusRequest
+ * @static
+ * @param {vtadmin.GetFullStatusRequest} message GetFullStatusRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ GetFullStatusRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults) {
+ object.cluster_id = "";
+ object.alias = null;
+ }
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ object.cluster_id = message.cluster_id;
+ if (message.alias != null && message.hasOwnProperty("alias"))
+ object.alias = $root.topodata.TabletAlias.toObject(message.alias, options);
+ return object;
+ };
+
+ /**
+ * Converts this GetFullStatusRequest to JSON.
+ * @function toJSON
+ * @memberof vtadmin.GetFullStatusRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ GetFullStatusRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return GetFullStatusRequest;
+ })();
+
vtadmin.GetGatesRequest = (function() {
/**
@@ -13692,6 +14072,216 @@ $root.vtadmin = (function() {
return GetTabletsResponse;
})();
+ vtadmin.GetTopologyPathRequest = (function() {
+
+ /**
+ * Properties of a GetTopologyPathRequest.
+ * @memberof vtadmin
+ * @interface IGetTopologyPathRequest
+ * @property {string|null} [cluster_id] GetTopologyPathRequest cluster_id
+ * @property {string|null} [path] GetTopologyPathRequest path
+ */
+
+ /**
+ * Constructs a new GetTopologyPathRequest.
+ * @memberof vtadmin
+ * @classdesc Represents a GetTopologyPathRequest.
+ * @implements IGetTopologyPathRequest
+ * @constructor
+ * @param {vtadmin.IGetTopologyPathRequest=} [properties] Properties to set
+ */
+ function GetTopologyPathRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * GetTopologyPathRequest cluster_id.
+ * @member {string} cluster_id
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @instance
+ */
+ GetTopologyPathRequest.prototype.cluster_id = "";
+
+ /**
+ * GetTopologyPathRequest path.
+ * @member {string} path
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @instance
+ */
+ GetTopologyPathRequest.prototype.path = "";
+
+ /**
+ * Creates a new GetTopologyPathRequest instance using the specified properties.
+ * @function create
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {vtadmin.IGetTopologyPathRequest=} [properties] Properties to set
+ * @returns {vtadmin.GetTopologyPathRequest} GetTopologyPathRequest instance
+ */
+ GetTopologyPathRequest.create = function create(properties) {
+ return new GetTopologyPathRequest(properties);
+ };
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtadmin.GetTopologyPathRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {vtadmin.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetTopologyPathRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id);
+ if (message.path != null && Object.hasOwnProperty.call(message, "path"))
+ writer.uint32(/* id 2, wireType 2 =*/18).string(message.path);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtadmin.GetTopologyPathRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {vtadmin.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetTopologyPathRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtadmin.GetTopologyPathRequest} GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetTopologyPathRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.GetTopologyPathRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.cluster_id = reader.string();
+ break;
+ case 2:
+ message.path = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtadmin.GetTopologyPathRequest} GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetTopologyPathRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a GetTopologyPathRequest message.
+ * @function verify
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ GetTopologyPathRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ if (!$util.isString(message.cluster_id))
+ return "cluster_id: string expected";
+ if (message.path != null && message.hasOwnProperty("path"))
+ if (!$util.isString(message.path))
+ return "path: string expected";
+ return null;
+ };
+
+ /**
+ * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtadmin.GetTopologyPathRequest} GetTopologyPathRequest
+ */
+ GetTopologyPathRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtadmin.GetTopologyPathRequest)
+ return object;
+ var message = new $root.vtadmin.GetTopologyPathRequest();
+ if (object.cluster_id != null)
+ message.cluster_id = String(object.cluster_id);
+ if (object.path != null)
+ message.path = String(object.path);
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @static
+ * @param {vtadmin.GetTopologyPathRequest} message GetTopologyPathRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ GetTopologyPathRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults) {
+ object.cluster_id = "";
+ object.path = "";
+ }
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ object.cluster_id = message.cluster_id;
+ if (message.path != null && message.hasOwnProperty("path"))
+ object.path = message.path;
+ return object;
+ };
+
+ /**
+ * Converts this GetTopologyPathRequest to JSON.
+ * @function toJSON
+ * @memberof vtadmin.GetTopologyPathRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ GetTopologyPathRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return GetTopologyPathRequest;
+ })();
+
vtadmin.GetVSchemaRequest = (function() {
/**
@@ -23093,6 +23683,216 @@ $root.vtadmin = (function() {
return TabletExternallyReparentedRequest;
})();
+ vtadmin.ValidateRequest = (function() {
+
+ /**
+ * Properties of a ValidateRequest.
+ * @memberof vtadmin
+ * @interface IValidateRequest
+ * @property {string|null} [cluster_id] ValidateRequest cluster_id
+ * @property {boolean|null} [ping_tablets] ValidateRequest ping_tablets
+ */
+
+ /**
+ * Constructs a new ValidateRequest.
+ * @memberof vtadmin
+ * @classdesc Represents a ValidateRequest.
+ * @implements IValidateRequest
+ * @constructor
+ * @param {vtadmin.IValidateRequest=} [properties] Properties to set
+ */
+ function ValidateRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * ValidateRequest cluster_id.
+ * @member {string} cluster_id
+ * @memberof vtadmin.ValidateRequest
+ * @instance
+ */
+ ValidateRequest.prototype.cluster_id = "";
+
+ /**
+ * ValidateRequest ping_tablets.
+ * @member {boolean} ping_tablets
+ * @memberof vtadmin.ValidateRequest
+ * @instance
+ */
+ ValidateRequest.prototype.ping_tablets = false;
+
+ /**
+ * Creates a new ValidateRequest instance using the specified properties.
+ * @function create
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {vtadmin.IValidateRequest=} [properties] Properties to set
+ * @returns {vtadmin.ValidateRequest} ValidateRequest instance
+ */
+ ValidateRequest.create = function create(properties) {
+ return new ValidateRequest(properties);
+ };
+
+ /**
+ * Encodes the specified ValidateRequest message. Does not implicitly {@link vtadmin.ValidateRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {vtadmin.IValidateRequest} message ValidateRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id);
+ if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets"))
+ writer.uint32(/* id 2, wireType 0 =*/16).bool(message.ping_tablets);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified ValidateRequest message, length delimited. Does not implicitly {@link vtadmin.ValidateRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {vtadmin.IValidateRequest} message ValidateRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a ValidateRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtadmin.ValidateRequest} ValidateRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.ValidateRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.cluster_id = reader.string();
+ break;
+ case 2:
+ message.ping_tablets = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a ValidateRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtadmin.ValidateRequest} ValidateRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a ValidateRequest message.
+ * @function verify
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ ValidateRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ if (!$util.isString(message.cluster_id))
+ return "cluster_id: string expected";
+ if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets"))
+ if (typeof message.ping_tablets !== "boolean")
+ return "ping_tablets: boolean expected";
+ return null;
+ };
+
+ /**
+ * Creates a ValidateRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtadmin.ValidateRequest} ValidateRequest
+ */
+ ValidateRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtadmin.ValidateRequest)
+ return object;
+ var message = new $root.vtadmin.ValidateRequest();
+ if (object.cluster_id != null)
+ message.cluster_id = String(object.cluster_id);
+ if (object.ping_tablets != null)
+ message.ping_tablets = Boolean(object.ping_tablets);
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a ValidateRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtadmin.ValidateRequest
+ * @static
+ * @param {vtadmin.ValidateRequest} message ValidateRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ ValidateRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults) {
+ object.cluster_id = "";
+ object.ping_tablets = false;
+ }
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ object.cluster_id = message.cluster_id;
+ if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets"))
+ object.ping_tablets = message.ping_tablets;
+ return object;
+ };
+
+ /**
+ * Converts this ValidateRequest to JSON.
+ * @function toJSON
+ * @memberof vtadmin.ValidateRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ ValidateRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return ValidateRequest;
+ })();
+
vtadmin.ValidateKeyspaceRequest = (function() {
/**
@@ -23535,6 +24335,260 @@ $root.vtadmin = (function() {
return ValidateSchemaKeyspaceRequest;
})();
+ vtadmin.ValidateShardRequest = (function() {
+
+ /**
+ * Properties of a ValidateShardRequest.
+ * @memberof vtadmin
+ * @interface IValidateShardRequest
+ * @property {string|null} [cluster_id] ValidateShardRequest cluster_id
+ * @property {string|null} [keyspace] ValidateShardRequest keyspace
+ * @property {string|null} [shard] ValidateShardRequest shard
+ * @property {boolean|null} [ping_tablets] ValidateShardRequest ping_tablets
+ */
+
+ /**
+ * Constructs a new ValidateShardRequest.
+ * @memberof vtadmin
+ * @classdesc Represents a ValidateShardRequest.
+ * @implements IValidateShardRequest
+ * @constructor
+ * @param {vtadmin.IValidateShardRequest=} [properties] Properties to set
+ */
+ function ValidateShardRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * ValidateShardRequest cluster_id.
+ * @member {string} cluster_id
+ * @memberof vtadmin.ValidateShardRequest
+ * @instance
+ */
+ ValidateShardRequest.prototype.cluster_id = "";
+
+ /**
+ * ValidateShardRequest keyspace.
+ * @member {string} keyspace
+ * @memberof vtadmin.ValidateShardRequest
+ * @instance
+ */
+ ValidateShardRequest.prototype.keyspace = "";
+
+ /**
+ * ValidateShardRequest shard.
+ * @member {string} shard
+ * @memberof vtadmin.ValidateShardRequest
+ * @instance
+ */
+ ValidateShardRequest.prototype.shard = "";
+
+ /**
+ * ValidateShardRequest ping_tablets.
+ * @member {boolean} ping_tablets
+ * @memberof vtadmin.ValidateShardRequest
+ * @instance
+ */
+ ValidateShardRequest.prototype.ping_tablets = false;
+
+ /**
+ * Creates a new ValidateShardRequest instance using the specified properties.
+ * @function create
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {vtadmin.IValidateShardRequest=} [properties] Properties to set
+ * @returns {vtadmin.ValidateShardRequest} ValidateShardRequest instance
+ */
+ ValidateShardRequest.create = function create(properties) {
+ return new ValidateShardRequest(properties);
+ };
+
+ /**
+ * Encodes the specified ValidateShardRequest message. Does not implicitly {@link vtadmin.ValidateShardRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {vtadmin.IValidateShardRequest} message ValidateShardRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateShardRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id);
+ if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace"))
+ writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace);
+ if (message.shard != null && Object.hasOwnProperty.call(message, "shard"))
+ writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard);
+ if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets"))
+ writer.uint32(/* id 4, wireType 0 =*/32).bool(message.ping_tablets);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified ValidateShardRequest message, length delimited. Does not implicitly {@link vtadmin.ValidateShardRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {vtadmin.IValidateShardRequest} message ValidateShardRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateShardRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a ValidateShardRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtadmin.ValidateShardRequest} ValidateShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateShardRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.ValidateShardRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.cluster_id = reader.string();
+ break;
+ case 2:
+ message.keyspace = reader.string();
+ break;
+ case 3:
+ message.shard = reader.string();
+ break;
+ case 4:
+ message.ping_tablets = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a ValidateShardRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtadmin.ValidateShardRequest} ValidateShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateShardRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a ValidateShardRequest message.
+ * @function verify
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ ValidateShardRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ if (!$util.isString(message.cluster_id))
+ return "cluster_id: string expected";
+ if (message.keyspace != null && message.hasOwnProperty("keyspace"))
+ if (!$util.isString(message.keyspace))
+ return "keyspace: string expected";
+ if (message.shard != null && message.hasOwnProperty("shard"))
+ if (!$util.isString(message.shard))
+ return "shard: string expected";
+ if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets"))
+ if (typeof message.ping_tablets !== "boolean")
+ return "ping_tablets: boolean expected";
+ return null;
+ };
+
+ /**
+ * Creates a ValidateShardRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtadmin.ValidateShardRequest} ValidateShardRequest
+ */
+ ValidateShardRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtadmin.ValidateShardRequest)
+ return object;
+ var message = new $root.vtadmin.ValidateShardRequest();
+ if (object.cluster_id != null)
+ message.cluster_id = String(object.cluster_id);
+ if (object.keyspace != null)
+ message.keyspace = String(object.keyspace);
+ if (object.shard != null)
+ message.shard = String(object.shard);
+ if (object.ping_tablets != null)
+ message.ping_tablets = Boolean(object.ping_tablets);
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a ValidateShardRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtadmin.ValidateShardRequest
+ * @static
+ * @param {vtadmin.ValidateShardRequest} message ValidateShardRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ ValidateShardRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults) {
+ object.cluster_id = "";
+ object.keyspace = "";
+ object.shard = "";
+ object.ping_tablets = false;
+ }
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ object.cluster_id = message.cluster_id;
+ if (message.keyspace != null && message.hasOwnProperty("keyspace"))
+ object.keyspace = message.keyspace;
+ if (message.shard != null && message.hasOwnProperty("shard"))
+ object.shard = message.shard;
+ if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets"))
+ object.ping_tablets = message.ping_tablets;
+ return object;
+ };
+
+ /**
+ * Converts this ValidateShardRequest to JSON.
+ * @function toJSON
+ * @memberof vtadmin.ValidateShardRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ ValidateShardRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return ValidateShardRequest;
+ })();
+
vtadmin.ValidateVersionKeyspaceRequest = (function() {
/**
@@ -23745,6 +24799,238 @@ $root.vtadmin = (function() {
return ValidateVersionKeyspaceRequest;
})();
+ vtadmin.ValidateVersionShardRequest = (function() {
+
+ /**
+ * Properties of a ValidateVersionShardRequest.
+ * @memberof vtadmin
+ * @interface IValidateVersionShardRequest
+ * @property {string|null} [cluster_id] ValidateVersionShardRequest cluster_id
+ * @property {string|null} [keyspace] ValidateVersionShardRequest keyspace
+ * @property {string|null} [shard] ValidateVersionShardRequest shard
+ */
+
+ /**
+ * Constructs a new ValidateVersionShardRequest.
+ * @memberof vtadmin
+ * @classdesc Represents a ValidateVersionShardRequest.
+ * @implements IValidateVersionShardRequest
+ * @constructor
+ * @param {vtadmin.IValidateVersionShardRequest=} [properties] Properties to set
+ */
+ function ValidateVersionShardRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * ValidateVersionShardRequest cluster_id.
+ * @member {string} cluster_id
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @instance
+ */
+ ValidateVersionShardRequest.prototype.cluster_id = "";
+
+ /**
+ * ValidateVersionShardRequest keyspace.
+ * @member {string} keyspace
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @instance
+ */
+ ValidateVersionShardRequest.prototype.keyspace = "";
+
+ /**
+ * ValidateVersionShardRequest shard.
+ * @member {string} shard
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @instance
+ */
+ ValidateVersionShardRequest.prototype.shard = "";
+
+ /**
+ * Creates a new ValidateVersionShardRequest instance using the specified properties.
+ * @function create
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {vtadmin.IValidateVersionShardRequest=} [properties] Properties to set
+ * @returns {vtadmin.ValidateVersionShardRequest} ValidateVersionShardRequest instance
+ */
+ ValidateVersionShardRequest.create = function create(properties) {
+ return new ValidateVersionShardRequest(properties);
+ };
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtadmin.ValidateVersionShardRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {vtadmin.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateVersionShardRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id);
+ if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace"))
+ writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace);
+ if (message.shard != null && Object.hasOwnProperty.call(message, "shard"))
+ writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtadmin.ValidateVersionShardRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {vtadmin.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateVersionShardRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtadmin.ValidateVersionShardRequest} ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateVersionShardRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.ValidateVersionShardRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.cluster_id = reader.string();
+ break;
+ case 2:
+ message.keyspace = reader.string();
+ break;
+ case 3:
+ message.shard = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtadmin.ValidateVersionShardRequest} ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateVersionShardRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a ValidateVersionShardRequest message.
+ * @function verify
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ ValidateVersionShardRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ if (!$util.isString(message.cluster_id))
+ return "cluster_id: string expected";
+ if (message.keyspace != null && message.hasOwnProperty("keyspace"))
+ if (!$util.isString(message.keyspace))
+ return "keyspace: string expected";
+ if (message.shard != null && message.hasOwnProperty("shard"))
+ if (!$util.isString(message.shard))
+ return "shard: string expected";
+ return null;
+ };
+
+ /**
+ * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtadmin.ValidateVersionShardRequest} ValidateVersionShardRequest
+ */
+ ValidateVersionShardRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtadmin.ValidateVersionShardRequest)
+ return object;
+ var message = new $root.vtadmin.ValidateVersionShardRequest();
+ if (object.cluster_id != null)
+ message.cluster_id = String(object.cluster_id);
+ if (object.keyspace != null)
+ message.keyspace = String(object.keyspace);
+ if (object.shard != null)
+ message.shard = String(object.shard);
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @static
+ * @param {vtadmin.ValidateVersionShardRequest} message ValidateVersionShardRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ ValidateVersionShardRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults) {
+ object.cluster_id = "";
+ object.keyspace = "";
+ object.shard = "";
+ }
+ if (message.cluster_id != null && message.hasOwnProperty("cluster_id"))
+ object.cluster_id = message.cluster_id;
+ if (message.keyspace != null && message.hasOwnProperty("keyspace"))
+ object.keyspace = message.keyspace;
+ if (message.shard != null && message.hasOwnProperty("shard"))
+ object.shard = message.shard;
+ return object;
+ };
+
+ /**
+ * Converts this ValidateVersionShardRequest to JSON.
+ * @function toJSON
+ * @memberof vtadmin.ValidateVersionShardRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ ValidateVersionShardRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return ValidateVersionShardRequest;
+ })();
+
vtadmin.VTExplainRequest = (function() {
/**
@@ -51156,6 +52442,7 @@ $root.tabletmanagerdata = (function() {
* @interface IBackupRequest
* @property {number|Long|null} [concurrency] BackupRequest concurrency
* @property {boolean|null} [allow_primary] BackupRequest allow_primary
+ * @property {string|null} [backup_engine] BackupRequest backup_engine
*/
/**
@@ -51189,6 +52476,28 @@ $root.tabletmanagerdata = (function() {
*/
BackupRequest.prototype.allow_primary = false;
+ /**
+ * BackupRequest backup_engine.
+ * @member {string|null|undefined} backup_engine
+ * @memberof tabletmanagerdata.BackupRequest
+ * @instance
+ */
+ BackupRequest.prototype.backup_engine = null;
+
+ // OneOf field names bound to virtual getters and setters
+ var $oneOfFields;
+
+ /**
+ * BackupRequest _backup_engine.
+ * @member {"backup_engine"|undefined} _backup_engine
+ * @memberof tabletmanagerdata.BackupRequest
+ * @instance
+ */
+ Object.defineProperty(BackupRequest.prototype, "_backup_engine", {
+ get: $util.oneOfGetter($oneOfFields = ["backup_engine"]),
+ set: $util.oneOfSetter($oneOfFields)
+ });
+
/**
* Creates a new BackupRequest instance using the specified properties.
* @function create
@@ -51217,6 +52526,8 @@ $root.tabletmanagerdata = (function() {
writer.uint32(/* id 1, wireType 0 =*/8).int64(message.concurrency);
if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary"))
writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary);
+ if (message.backup_engine != null && Object.hasOwnProperty.call(message, "backup_engine"))
+ writer.uint32(/* id 5, wireType 2 =*/42).string(message.backup_engine);
return writer;
};
@@ -51257,6 +52568,9 @@ $root.tabletmanagerdata = (function() {
case 2:
message.allow_primary = reader.bool();
break;
+ case 5:
+ message.backup_engine = reader.string();
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -51292,12 +52606,18 @@ $root.tabletmanagerdata = (function() {
BackupRequest.verify = function verify(message) {
if (typeof message !== "object" || message === null)
return "object expected";
+ var properties = {};
if (message.concurrency != null && message.hasOwnProperty("concurrency"))
if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high)))
return "concurrency: integer|Long expected";
if (message.allow_primary != null && message.hasOwnProperty("allow_primary"))
if (typeof message.allow_primary !== "boolean")
return "allow_primary: boolean expected";
+ if (message.backup_engine != null && message.hasOwnProperty("backup_engine")) {
+ properties._backup_engine = 1;
+ if (!$util.isString(message.backup_engine))
+ return "backup_engine: string expected";
+ }
return null;
};
@@ -51324,6 +52644,8 @@ $root.tabletmanagerdata = (function() {
message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber();
if (object.allow_primary != null)
message.allow_primary = Boolean(object.allow_primary);
+ if (object.backup_engine != null)
+ message.backup_engine = String(object.backup_engine);
return message;
};
@@ -51355,6 +52677,11 @@ $root.tabletmanagerdata = (function() {
object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber() : message.concurrency;
if (message.allow_primary != null && message.hasOwnProperty("allow_primary"))
object.allow_primary = message.allow_primary;
+ if (message.backup_engine != null && message.hasOwnProperty("backup_engine")) {
+ object.backup_engine = message.backup_engine;
+ if (options.oneofs)
+ object._backup_engine = "backup_engine";
+ }
return object;
};
@@ -51571,6 +52898,7 @@ $root.tabletmanagerdata = (function() {
* @memberof tabletmanagerdata
* @interface IRestoreFromBackupRequest
* @property {vttime.ITime|null} [backup_time] RestoreFromBackupRequest backup_time
+ * @property {Array.|null} [allowed_backup_engines] RestoreFromBackupRequest allowed_backup_engines
*/
/**
@@ -51582,6 +52910,7 @@ $root.tabletmanagerdata = (function() {
* @param {tabletmanagerdata.IRestoreFromBackupRequest=} [properties] Properties to set
*/
function RestoreFromBackupRequest(properties) {
+ this.allowed_backup_engines = [];
if (properties)
for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
if (properties[keys[i]] != null)
@@ -51596,6 +52925,14 @@ $root.tabletmanagerdata = (function() {
*/
RestoreFromBackupRequest.prototype.backup_time = null;
+ /**
+ * RestoreFromBackupRequest allowed_backup_engines.
+ * @member {Array.} allowed_backup_engines
+ * @memberof tabletmanagerdata.RestoreFromBackupRequest
+ * @instance
+ */
+ RestoreFromBackupRequest.prototype.allowed_backup_engines = $util.emptyArray;
+
/**
* Creates a new RestoreFromBackupRequest instance using the specified properties.
* @function create
@@ -51622,6 +52959,9 @@ $root.tabletmanagerdata = (function() {
writer = $Writer.create();
if (message.backup_time != null && Object.hasOwnProperty.call(message, "backup_time"))
$root.vttime.Time.encode(message.backup_time, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim();
+ if (message.allowed_backup_engines != null && message.allowed_backup_engines.length)
+ for (var i = 0; i < message.allowed_backup_engines.length; ++i)
+ writer.uint32(/* id 5, wireType 2 =*/42).string(message.allowed_backup_engines[i]);
return writer;
};
@@ -51659,6 +52999,11 @@ $root.tabletmanagerdata = (function() {
case 1:
message.backup_time = $root.vttime.Time.decode(reader, reader.uint32());
break;
+ case 5:
+ if (!(message.allowed_backup_engines && message.allowed_backup_engines.length))
+ message.allowed_backup_engines = [];
+ message.allowed_backup_engines.push(reader.string());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -51699,6 +53044,13 @@ $root.tabletmanagerdata = (function() {
if (error)
return "backup_time." + error;
}
+ if (message.allowed_backup_engines != null && message.hasOwnProperty("allowed_backup_engines")) {
+ if (!Array.isArray(message.allowed_backup_engines))
+ return "allowed_backup_engines: array expected";
+ for (var i = 0; i < message.allowed_backup_engines.length; ++i)
+ if (!$util.isString(message.allowed_backup_engines[i]))
+ return "allowed_backup_engines: string[] expected";
+ }
return null;
};
@@ -51719,6 +53071,13 @@ $root.tabletmanagerdata = (function() {
throw TypeError(".tabletmanagerdata.RestoreFromBackupRequest.backup_time: object expected");
message.backup_time = $root.vttime.Time.fromObject(object.backup_time);
}
+ if (object.allowed_backup_engines) {
+ if (!Array.isArray(object.allowed_backup_engines))
+ throw TypeError(".tabletmanagerdata.RestoreFromBackupRequest.allowed_backup_engines: array expected");
+ message.allowed_backup_engines = [];
+ for (var i = 0; i < object.allowed_backup_engines.length; ++i)
+ message.allowed_backup_engines[i] = String(object.allowed_backup_engines[i]);
+ }
return message;
};
@@ -51735,10 +53094,17 @@ $root.tabletmanagerdata = (function() {
if (!options)
options = {};
var object = {};
+ if (options.arrays || options.defaults)
+ object.allowed_backup_engines = [];
if (options.defaults)
object.backup_time = null;
if (message.backup_time != null && message.hasOwnProperty("backup_time"))
object.backup_time = $root.vttime.Time.toObject(message.backup_time, options);
+ if (message.allowed_backup_engines && message.allowed_backup_engines.length) {
+ object.allowed_backup_engines = [];
+ for (var j = 0; j < message.allowed_backup_engines.length; ++j)
+ object.allowed_backup_engines[j] = message.allowed_backup_engines[j];
+ }
return object;
};
@@ -56067,6 +57433,8 @@ $root.query = (function() {
* @property {boolean|null} [skip_query_plan_cache] ExecuteOptions skip_query_plan_cache
* @property {query.ExecuteOptions.PlannerVersion|null} [planner_version] ExecuteOptions planner_version
* @property {boolean|null} [has_created_temp_tables] ExecuteOptions has_created_temp_tables
+ * @property {string|null} [WorkloadName] ExecuteOptions WorkloadName
+ * @property {string|null} [priority] ExecuteOptions priority
*/
/**
@@ -56148,6 +57516,22 @@ $root.query = (function() {
*/
ExecuteOptions.prototype.has_created_temp_tables = false;
+ /**
+ * ExecuteOptions WorkloadName.
+ * @member {string} WorkloadName
+ * @memberof query.ExecuteOptions
+ * @instance
+ */
+ ExecuteOptions.prototype.WorkloadName = "";
+
+ /**
+ * ExecuteOptions priority.
+ * @member {string} priority
+ * @memberof query.ExecuteOptions
+ * @instance
+ */
+ ExecuteOptions.prototype.priority = "";
+
/**
* Creates a new ExecuteOptions instance using the specified properties.
* @function create
@@ -56188,6 +57572,10 @@ $root.query = (function() {
writer.uint32(/* id 11, wireType 0 =*/88).int32(message.planner_version);
if (message.has_created_temp_tables != null && Object.hasOwnProperty.call(message, "has_created_temp_tables"))
writer.uint32(/* id 12, wireType 0 =*/96).bool(message.has_created_temp_tables);
+ if (message.WorkloadName != null && Object.hasOwnProperty.call(message, "WorkloadName"))
+ writer.uint32(/* id 15, wireType 2 =*/122).string(message.WorkloadName);
+ if (message.priority != null && Object.hasOwnProperty.call(message, "priority"))
+ writer.uint32(/* id 16, wireType 2 =*/130).string(message.priority);
return writer;
};
@@ -56246,6 +57634,12 @@ $root.query = (function() {
case 12:
message.has_created_temp_tables = reader.bool();
break;
+ case 15:
+ message.WorkloadName = reader.string();
+ break;
+ case 16:
+ message.priority = reader.string();
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -56338,6 +57732,12 @@ $root.query = (function() {
if (message.has_created_temp_tables != null && message.hasOwnProperty("has_created_temp_tables"))
if (typeof message.has_created_temp_tables !== "boolean")
return "has_created_temp_tables: boolean expected";
+ if (message.WorkloadName != null && message.hasOwnProperty("WorkloadName"))
+ if (!$util.isString(message.WorkloadName))
+ return "WorkloadName: string expected";
+ if (message.priority != null && message.hasOwnProperty("priority"))
+ if (!$util.isString(message.priority))
+ return "priority: string expected";
return null;
};
@@ -56460,6 +57860,10 @@ $root.query = (function() {
}
if (object.has_created_temp_tables != null)
message.has_created_temp_tables = Boolean(object.has_created_temp_tables);
+ if (object.WorkloadName != null)
+ message.WorkloadName = String(object.WorkloadName);
+ if (object.priority != null)
+ message.priority = String(object.priority);
return message;
};
@@ -56489,6 +57893,8 @@ $root.query = (function() {
object.skip_query_plan_cache = false;
object.planner_version = options.enums === String ? "DEFAULT_PLANNER" : 0;
object.has_created_temp_tables = false;
+ object.WorkloadName = "";
+ object.priority = "";
}
if (message.included_fields != null && message.hasOwnProperty("included_fields"))
object.included_fields = options.enums === String ? $root.query.ExecuteOptions.IncludedFields[message.included_fields] : message.included_fields;
@@ -56509,6 +57915,10 @@ $root.query = (function() {
object.planner_version = options.enums === String ? $root.query.ExecuteOptions.PlannerVersion[message.planner_version] : message.planner_version;
if (message.has_created_temp_tables != null && message.hasOwnProperty("has_created_temp_tables"))
object.has_created_temp_tables = message.has_created_temp_tables;
+ if (message.WorkloadName != null && message.hasOwnProperty("WorkloadName"))
+ object.WorkloadName = message.WorkloadName;
+ if (message.priority != null && message.hasOwnProperty("priority"))
+ object.priority = message.priority;
return object;
};
@@ -72745,6 +74155,8 @@ $root.replicationdata = (function() {
* @memberof replicationdata
* @interface IStatus
* @property {string|null} [position] Status position
+ * @property {boolean|null} [io_thread_running] Status io_thread_running
+ * @property {boolean|null} [sql_thread_running] Status sql_thread_running
* @property {number|null} [replication_lag_seconds] Status replication_lag_seconds
* @property {string|null} [source_host] Status source_host
* @property {number|null} [source_port] Status source_port
@@ -72766,6 +74178,7 @@ $root.replicationdata = (function() {
* @property {boolean|null} [has_replication_filters] Status has_replication_filters
* @property {boolean|null} [ssl_allowed] Status ssl_allowed
* @property {boolean|null} [replication_lag_unknown] Status replication_lag_unknown
+ * @property {boolean|null} [backup_running] Status backup_running
*/
/**
@@ -72791,6 +74204,22 @@ $root.replicationdata = (function() {
*/
Status.prototype.position = "";
+ /**
+ * Status io_thread_running.
+ * @member {boolean} io_thread_running
+ * @memberof replicationdata.Status
+ * @instance
+ */
+ Status.prototype.io_thread_running = false;
+
+ /**
+ * Status sql_thread_running.
+ * @member {boolean} sql_thread_running
+ * @memberof replicationdata.Status
+ * @instance
+ */
+ Status.prototype.sql_thread_running = false;
+
/**
* Status replication_lag_seconds.
* @member {number} replication_lag_seconds
@@ -72959,6 +74388,14 @@ $root.replicationdata = (function() {
*/
Status.prototype.replication_lag_unknown = false;
+ /**
+ * Status backup_running.
+ * @member {boolean} backup_running
+ * @memberof replicationdata.Status
+ * @instance
+ */
+ Status.prototype.backup_running = false;
+
/**
* Creates a new Status instance using the specified properties.
* @function create
@@ -72985,6 +74422,10 @@ $root.replicationdata = (function() {
writer = $Writer.create();
if (message.position != null && Object.hasOwnProperty.call(message, "position"))
writer.uint32(/* id 1, wireType 2 =*/10).string(message.position);
+ if (message.io_thread_running != null && Object.hasOwnProperty.call(message, "io_thread_running"))
+ writer.uint32(/* id 2, wireType 0 =*/16).bool(message.io_thread_running);
+ if (message.sql_thread_running != null && Object.hasOwnProperty.call(message, "sql_thread_running"))
+ writer.uint32(/* id 3, wireType 0 =*/24).bool(message.sql_thread_running);
if (message.replication_lag_seconds != null && Object.hasOwnProperty.call(message, "replication_lag_seconds"))
writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.replication_lag_seconds);
if (message.source_host != null && Object.hasOwnProperty.call(message, "source_host"))
@@ -73027,6 +74468,8 @@ $root.replicationdata = (function() {
writer.uint32(/* id 23, wireType 0 =*/184).bool(message.ssl_allowed);
if (message.replication_lag_unknown != null && Object.hasOwnProperty.call(message, "replication_lag_unknown"))
writer.uint32(/* id 24, wireType 0 =*/192).bool(message.replication_lag_unknown);
+ if (message.backup_running != null && Object.hasOwnProperty.call(message, "backup_running"))
+ writer.uint32(/* id 25, wireType 0 =*/200).bool(message.backup_running);
return writer;
};
@@ -73064,6 +74507,12 @@ $root.replicationdata = (function() {
case 1:
message.position = reader.string();
break;
+ case 2:
+ message.io_thread_running = reader.bool();
+ break;
+ case 3:
+ message.sql_thread_running = reader.bool();
+ break;
case 4:
message.replication_lag_seconds = reader.uint32();
break;
@@ -73127,6 +74576,9 @@ $root.replicationdata = (function() {
case 24:
message.replication_lag_unknown = reader.bool();
break;
+ case 25:
+ message.backup_running = reader.bool();
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -73165,6 +74617,12 @@ $root.replicationdata = (function() {
if (message.position != null && message.hasOwnProperty("position"))
if (!$util.isString(message.position))
return "position: string expected";
+ if (message.io_thread_running != null && message.hasOwnProperty("io_thread_running"))
+ if (typeof message.io_thread_running !== "boolean")
+ return "io_thread_running: boolean expected";
+ if (message.sql_thread_running != null && message.hasOwnProperty("sql_thread_running"))
+ if (typeof message.sql_thread_running !== "boolean")
+ return "sql_thread_running: boolean expected";
if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds"))
if (!$util.isInteger(message.replication_lag_seconds))
return "replication_lag_seconds: integer expected";
@@ -73228,6 +74686,9 @@ $root.replicationdata = (function() {
if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown"))
if (typeof message.replication_lag_unknown !== "boolean")
return "replication_lag_unknown: boolean expected";
+ if (message.backup_running != null && message.hasOwnProperty("backup_running"))
+ if (typeof message.backup_running !== "boolean")
+ return "backup_running: boolean expected";
return null;
};
@@ -73245,6 +74706,10 @@ $root.replicationdata = (function() {
var message = new $root.replicationdata.Status();
if (object.position != null)
message.position = String(object.position);
+ if (object.io_thread_running != null)
+ message.io_thread_running = Boolean(object.io_thread_running);
+ if (object.sql_thread_running != null)
+ message.sql_thread_running = Boolean(object.sql_thread_running);
if (object.replication_lag_seconds != null)
message.replication_lag_seconds = object.replication_lag_seconds >>> 0;
if (object.source_host != null)
@@ -73287,6 +74752,8 @@ $root.replicationdata = (function() {
message.ssl_allowed = Boolean(object.ssl_allowed);
if (object.replication_lag_unknown != null)
message.replication_lag_unknown = Boolean(object.replication_lag_unknown);
+ if (object.backup_running != null)
+ message.backup_running = Boolean(object.backup_running);
return message;
};
@@ -73305,6 +74772,8 @@ $root.replicationdata = (function() {
var object = {};
if (options.defaults) {
object.position = "";
+ object.io_thread_running = false;
+ object.sql_thread_running = false;
object.replication_lag_seconds = 0;
object.source_host = "";
object.source_port = 0;
@@ -73326,9 +74795,14 @@ $root.replicationdata = (function() {
object.has_replication_filters = false;
object.ssl_allowed = false;
object.replication_lag_unknown = false;
+ object.backup_running = false;
}
if (message.position != null && message.hasOwnProperty("position"))
object.position = message.position;
+ if (message.io_thread_running != null && message.hasOwnProperty("io_thread_running"))
+ object.io_thread_running = message.io_thread_running;
+ if (message.sql_thread_running != null && message.hasOwnProperty("sql_thread_running"))
+ object.sql_thread_running = message.sql_thread_running;
if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds"))
object.replication_lag_seconds = message.replication_lag_seconds;
if (message.source_host != null && message.hasOwnProperty("source_host"))
@@ -73371,6 +74845,8 @@ $root.replicationdata = (function() {
object.ssl_allowed = message.ssl_allowed;
if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown"))
object.replication_lag_unknown = message.replication_lag_unknown;
+ if (message.backup_running != null && message.hasOwnProperty("backup_running"))
+ object.backup_running = message.backup_running;
return object;
};
@@ -83824,6 +85300,7 @@ $root.vtctldata = (function() {
* @property {topodata.ITabletAlias|null} [tablet_alias] BackupRequest tablet_alias
* @property {boolean|null} [allow_primary] BackupRequest allow_primary
* @property {number|Long|null} [concurrency] BackupRequest concurrency
+ * @property {string|null} [backup_engine] BackupRequest backup_engine
*/
/**
@@ -83865,6 +85342,28 @@ $root.vtctldata = (function() {
*/
BackupRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0;
+ /**
+ * BackupRequest backup_engine.
+ * @member {string|null|undefined} backup_engine
+ * @memberof vtctldata.BackupRequest
+ * @instance
+ */
+ BackupRequest.prototype.backup_engine = null;
+
+ // OneOf field names bound to virtual getters and setters
+ var $oneOfFields;
+
+ /**
+ * BackupRequest _backup_engine.
+ * @member {"backup_engine"|undefined} _backup_engine
+ * @memberof vtctldata.BackupRequest
+ * @instance
+ */
+ Object.defineProperty(BackupRequest.prototype, "_backup_engine", {
+ get: $util.oneOfGetter($oneOfFields = ["backup_engine"]),
+ set: $util.oneOfSetter($oneOfFields)
+ });
+
/**
* Creates a new BackupRequest instance using the specified properties.
* @function create
@@ -83895,6 +85394,8 @@ $root.vtctldata = (function() {
writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary);
if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency"))
writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.concurrency);
+ if (message.backup_engine != null && Object.hasOwnProperty.call(message, "backup_engine"))
+ writer.uint32(/* id 6, wireType 2 =*/50).string(message.backup_engine);
return writer;
};
@@ -83938,6 +85439,9 @@ $root.vtctldata = (function() {
case 3:
message.concurrency = reader.uint64();
break;
+ case 6:
+ message.backup_engine = reader.string();
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -83973,6 +85477,7 @@ $root.vtctldata = (function() {
BackupRequest.verify = function verify(message) {
if (typeof message !== "object" || message === null)
return "object expected";
+ var properties = {};
if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) {
var error = $root.topodata.TabletAlias.verify(message.tablet_alias);
if (error)
@@ -83984,6 +85489,11 @@ $root.vtctldata = (function() {
if (message.concurrency != null && message.hasOwnProperty("concurrency"))
if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high)))
return "concurrency: integer|Long expected";
+ if (message.backup_engine != null && message.hasOwnProperty("backup_engine")) {
+ properties._backup_engine = 1;
+ if (!$util.isString(message.backup_engine))
+ return "backup_engine: string expected";
+ }
return null;
};
@@ -84015,6 +85525,8 @@ $root.vtctldata = (function() {
message.concurrency = object.concurrency;
else if (typeof object.concurrency === "object")
message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true);
+ if (object.backup_engine != null)
+ message.backup_engine = String(object.backup_engine);
return message;
};
@@ -84049,6 +85561,11 @@ $root.vtctldata = (function() {
object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency;
else
object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency;
+ if (message.backup_engine != null && message.hasOwnProperty("backup_engine")) {
+ object.backup_engine = message.backup_engine;
+ if (options.oneofs)
+ object._backup_engine = "backup_engine";
+ }
return object;
};
@@ -88500,6 +90017,7 @@ $root.vtctldata = (function() {
* @property {Array.|null} [ignore_replicas] EmergencyReparentShardRequest ignore_replicas
* @property {vttime.IDuration|null} [wait_replicas_timeout] EmergencyReparentShardRequest wait_replicas_timeout
* @property {boolean|null} [prevent_cross_cell_promotion] EmergencyReparentShardRequest prevent_cross_cell_promotion
+ * @property {topodata.ITabletAlias|null} [expected_primary] EmergencyReparentShardRequest expected_primary
*/
/**
@@ -88566,6 +90084,14 @@ $root.vtctldata = (function() {
*/
EmergencyReparentShardRequest.prototype.prevent_cross_cell_promotion = false;
+ /**
+ * EmergencyReparentShardRequest expected_primary.
+ * @member {topodata.ITabletAlias|null|undefined} expected_primary
+ * @memberof vtctldata.EmergencyReparentShardRequest
+ * @instance
+ */
+ EmergencyReparentShardRequest.prototype.expected_primary = null;
+
/**
* Creates a new EmergencyReparentShardRequest instance using the specified properties.
* @function create
@@ -88603,6 +90129,8 @@ $root.vtctldata = (function() {
$root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim();
if (message.prevent_cross_cell_promotion != null && Object.hasOwnProperty.call(message, "prevent_cross_cell_promotion"))
writer.uint32(/* id 6, wireType 0 =*/48).bool(message.prevent_cross_cell_promotion);
+ if (message.expected_primary != null && Object.hasOwnProperty.call(message, "expected_primary"))
+ $root.topodata.TabletAlias.encode(message.expected_primary, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim();
return writer;
};
@@ -88657,6 +90185,9 @@ $root.vtctldata = (function() {
case 6:
message.prevent_cross_cell_promotion = reader.bool();
break;
+ case 8:
+ message.expected_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -88720,6 +90251,11 @@ $root.vtctldata = (function() {
if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion"))
if (typeof message.prevent_cross_cell_promotion !== "boolean")
return "prevent_cross_cell_promotion: boolean expected";
+ if (message.expected_primary != null && message.hasOwnProperty("expected_primary")) {
+ var error = $root.topodata.TabletAlias.verify(message.expected_primary);
+ if (error)
+ return "expected_primary." + error;
+ }
return null;
};
@@ -88761,6 +90297,11 @@ $root.vtctldata = (function() {
}
if (object.prevent_cross_cell_promotion != null)
message.prevent_cross_cell_promotion = Boolean(object.prevent_cross_cell_promotion);
+ if (object.expected_primary != null) {
+ if (typeof object.expected_primary !== "object")
+ throw TypeError(".vtctldata.EmergencyReparentShardRequest.expected_primary: object expected");
+ message.expected_primary = $root.topodata.TabletAlias.fromObject(object.expected_primary);
+ }
return message;
};
@@ -88785,6 +90326,7 @@ $root.vtctldata = (function() {
object.new_primary = null;
object.wait_replicas_timeout = null;
object.prevent_cross_cell_promotion = false;
+ object.expected_primary = null;
}
if (message.keyspace != null && message.hasOwnProperty("keyspace"))
object.keyspace = message.keyspace;
@@ -88801,6 +90343,8 @@ $root.vtctldata = (function() {
object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options);
if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion"))
object.prevent_cross_cell_promotion = message.prevent_cross_cell_promotion;
+ if (message.expected_primary != null && message.hasOwnProperty("expected_primary"))
+ object.expected_primary = $root.topodata.TabletAlias.toObject(message.expected_primary, options);
return object;
};
@@ -98554,6 +100098,656 @@ $root.vtctldata = (function() {
return GetTabletsResponse;
})();
+ vtctldata.GetTopologyPathRequest = (function() {
+
+ /**
+ * Properties of a GetTopologyPathRequest.
+ * @memberof vtctldata
+ * @interface IGetTopologyPathRequest
+ * @property {string|null} [path] GetTopologyPathRequest path
+ */
+
+ /**
+ * Constructs a new GetTopologyPathRequest.
+ * @memberof vtctldata
+ * @classdesc Represents a GetTopologyPathRequest.
+ * @implements IGetTopologyPathRequest
+ * @constructor
+ * @param {vtctldata.IGetTopologyPathRequest=} [properties] Properties to set
+ */
+ function GetTopologyPathRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * GetTopologyPathRequest path.
+ * @member {string} path
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @instance
+ */
+ GetTopologyPathRequest.prototype.path = "";
+
+ /**
+ * Creates a new GetTopologyPathRequest instance using the specified properties.
+ * @function create
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {vtctldata.IGetTopologyPathRequest=} [properties] Properties to set
+ * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest instance
+ */
+ GetTopologyPathRequest.create = function create(properties) {
+ return new GetTopologyPathRequest(properties);
+ };
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {vtctldata.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetTopologyPathRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.path != null && Object.hasOwnProperty.call(message, "path"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.path);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {vtctldata.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetTopologyPathRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetTopologyPathRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTopologyPathRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.path = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetTopologyPathRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a GetTopologyPathRequest message.
+ * @function verify
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ GetTopologyPathRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.path != null && message.hasOwnProperty("path"))
+ if (!$util.isString(message.path))
+ return "path: string expected";
+ return null;
+ };
+
+ /**
+ * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest
+ */
+ GetTopologyPathRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtctldata.GetTopologyPathRequest)
+ return object;
+ var message = new $root.vtctldata.GetTopologyPathRequest();
+ if (object.path != null)
+ message.path = String(object.path);
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @static
+ * @param {vtctldata.GetTopologyPathRequest} message GetTopologyPathRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ GetTopologyPathRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults)
+ object.path = "";
+ if (message.path != null && message.hasOwnProperty("path"))
+ object.path = message.path;
+ return object;
+ };
+
+ /**
+ * Converts this GetTopologyPathRequest to JSON.
+ * @function toJSON
+ * @memberof vtctldata.GetTopologyPathRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ GetTopologyPathRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return GetTopologyPathRequest;
+ })();
+
+ vtctldata.GetTopologyPathResponse = (function() {
+
+ /**
+ * Properties of a GetTopologyPathResponse.
+ * @memberof vtctldata
+ * @interface IGetTopologyPathResponse
+ * @property {vtctldata.ITopologyCell|null} [cell] GetTopologyPathResponse cell
+ */
+
+ /**
+ * Constructs a new GetTopologyPathResponse.
+ * @memberof vtctldata
+ * @classdesc Represents a GetTopologyPathResponse.
+ * @implements IGetTopologyPathResponse
+ * @constructor
+ * @param {vtctldata.IGetTopologyPathResponse=} [properties] Properties to set
+ */
+ function GetTopologyPathResponse(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * GetTopologyPathResponse cell.
+ * @member {vtctldata.ITopologyCell|null|undefined} cell
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @instance
+ */
+ GetTopologyPathResponse.prototype.cell = null;
+
+ /**
+ * Creates a new GetTopologyPathResponse instance using the specified properties.
+ * @function create
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {vtctldata.IGetTopologyPathResponse=} [properties] Properties to set
+ * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse instance
+ */
+ GetTopologyPathResponse.create = function create(properties) {
+ return new GetTopologyPathResponse(properties);
+ };
+
+ /**
+ * Encodes the specified GetTopologyPathResponse message. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages.
+ * @function encode
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {vtctldata.IGetTopologyPathResponse} message GetTopologyPathResponse message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetTopologyPathResponse.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.cell != null && Object.hasOwnProperty.call(message, "cell"))
+ $root.vtctldata.TopologyCell.encode(message.cell, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim();
+ return writer;
+ };
+
+ /**
+ * Encodes the specified GetTopologyPathResponse message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {vtctldata.IGetTopologyPathResponse} message GetTopologyPathResponse message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ GetTopologyPathResponse.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a GetTopologyPathResponse message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetTopologyPathResponse.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTopologyPathResponse();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.cell = $root.vtctldata.TopologyCell.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a GetTopologyPathResponse message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ GetTopologyPathResponse.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a GetTopologyPathResponse message.
+ * @function verify
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ GetTopologyPathResponse.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.cell != null && message.hasOwnProperty("cell")) {
+ var error = $root.vtctldata.TopologyCell.verify(message.cell);
+ if (error)
+ return "cell." + error;
+ }
+ return null;
+ };
+
+ /**
+ * Creates a GetTopologyPathResponse message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse
+ */
+ GetTopologyPathResponse.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtctldata.GetTopologyPathResponse)
+ return object;
+ var message = new $root.vtctldata.GetTopologyPathResponse();
+ if (object.cell != null) {
+ if (typeof object.cell !== "object")
+ throw TypeError(".vtctldata.GetTopologyPathResponse.cell: object expected");
+ message.cell = $root.vtctldata.TopologyCell.fromObject(object.cell);
+ }
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a GetTopologyPathResponse message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @static
+ * @param {vtctldata.GetTopologyPathResponse} message GetTopologyPathResponse
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ GetTopologyPathResponse.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults)
+ object.cell = null;
+ if (message.cell != null && message.hasOwnProperty("cell"))
+ object.cell = $root.vtctldata.TopologyCell.toObject(message.cell, options);
+ return object;
+ };
+
+ /**
+ * Converts this GetTopologyPathResponse to JSON.
+ * @function toJSON
+ * @memberof vtctldata.GetTopologyPathResponse
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ GetTopologyPathResponse.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return GetTopologyPathResponse;
+ })();
+
+ vtctldata.TopologyCell = (function() {
+
+ /**
+ * Properties of a TopologyCell.
+ * @memberof vtctldata
+ * @interface ITopologyCell
+ * @property {string|null} [name] TopologyCell name
+ * @property {string|null} [path] TopologyCell path
+ * @property {string|null} [data] TopologyCell data
+ * @property {Array.|null} [children] TopologyCell children
+ */
+
+ /**
+ * Constructs a new TopologyCell.
+ * @memberof vtctldata
+ * @classdesc Represents a TopologyCell.
+ * @implements ITopologyCell
+ * @constructor
+ * @param {vtctldata.ITopologyCell=} [properties] Properties to set
+ */
+ function TopologyCell(properties) {
+ this.children = [];
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * TopologyCell name.
+ * @member {string} name
+ * @memberof vtctldata.TopologyCell
+ * @instance
+ */
+ TopologyCell.prototype.name = "";
+
+ /**
+ * TopologyCell path.
+ * @member {string} path
+ * @memberof vtctldata.TopologyCell
+ * @instance
+ */
+ TopologyCell.prototype.path = "";
+
+ /**
+ * TopologyCell data.
+ * @member {string} data
+ * @memberof vtctldata.TopologyCell
+ * @instance
+ */
+ TopologyCell.prototype.data = "";
+
+ /**
+ * TopologyCell children.
+ * @member {Array.} children
+ * @memberof vtctldata.TopologyCell
+ * @instance
+ */
+ TopologyCell.prototype.children = $util.emptyArray;
+
+ /**
+ * Creates a new TopologyCell instance using the specified properties.
+ * @function create
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {vtctldata.ITopologyCell=} [properties] Properties to set
+ * @returns {vtctldata.TopologyCell} TopologyCell instance
+ */
+ TopologyCell.create = function create(properties) {
+ return new TopologyCell(properties);
+ };
+
+ /**
+ * Encodes the specified TopologyCell message. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages.
+ * @function encode
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {vtctldata.ITopologyCell} message TopologyCell message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ TopologyCell.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.name != null && Object.hasOwnProperty.call(message, "name"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.name);
+ if (message.path != null && Object.hasOwnProperty.call(message, "path"))
+ writer.uint32(/* id 2, wireType 2 =*/18).string(message.path);
+ if (message.data != null && Object.hasOwnProperty.call(message, "data"))
+ writer.uint32(/* id 3, wireType 2 =*/26).string(message.data);
+ if (message.children != null && message.children.length)
+ for (var i = 0; i < message.children.length; ++i)
+ writer.uint32(/* id 4, wireType 2 =*/34).string(message.children[i]);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified TopologyCell message, length delimited. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {vtctldata.ITopologyCell} message TopologyCell message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ TopologyCell.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a TopologyCell message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtctldata.TopologyCell} TopologyCell
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ TopologyCell.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TopologyCell();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 2:
+ message.path = reader.string();
+ break;
+ case 3:
+ message.data = reader.string();
+ break;
+ case 4:
+ if (!(message.children && message.children.length))
+ message.children = [];
+ message.children.push(reader.string());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a TopologyCell message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtctldata.TopologyCell} TopologyCell
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ TopologyCell.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a TopologyCell message.
+ * @function verify
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ TopologyCell.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.name != null && message.hasOwnProperty("name"))
+ if (!$util.isString(message.name))
+ return "name: string expected";
+ if (message.path != null && message.hasOwnProperty("path"))
+ if (!$util.isString(message.path))
+ return "path: string expected";
+ if (message.data != null && message.hasOwnProperty("data"))
+ if (!$util.isString(message.data))
+ return "data: string expected";
+ if (message.children != null && message.hasOwnProperty("children")) {
+ if (!Array.isArray(message.children))
+ return "children: array expected";
+ for (var i = 0; i < message.children.length; ++i)
+ if (!$util.isString(message.children[i]))
+ return "children: string[] expected";
+ }
+ return null;
+ };
+
+ /**
+ * Creates a TopologyCell message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtctldata.TopologyCell} TopologyCell
+ */
+ TopologyCell.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtctldata.TopologyCell)
+ return object;
+ var message = new $root.vtctldata.TopologyCell();
+ if (object.name != null)
+ message.name = String(object.name);
+ if (object.path != null)
+ message.path = String(object.path);
+ if (object.data != null)
+ message.data = String(object.data);
+ if (object.children) {
+ if (!Array.isArray(object.children))
+ throw TypeError(".vtctldata.TopologyCell.children: array expected");
+ message.children = [];
+ for (var i = 0; i < object.children.length; ++i)
+ message.children[i] = String(object.children[i]);
+ }
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a TopologyCell message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtctldata.TopologyCell
+ * @static
+ * @param {vtctldata.TopologyCell} message TopologyCell
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ TopologyCell.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.arrays || options.defaults)
+ object.children = [];
+ if (options.defaults) {
+ object.name = "";
+ object.path = "";
+ object.data = "";
+ }
+ if (message.name != null && message.hasOwnProperty("name"))
+ object.name = message.name;
+ if (message.path != null && message.hasOwnProperty("path"))
+ object.path = message.path;
+ if (message.data != null && message.hasOwnProperty("data"))
+ object.data = message.data;
+ if (message.children && message.children.length) {
+ object.children = [];
+ for (var j = 0; j < message.children.length; ++j)
+ object.children[j] = message.children[j];
+ }
+ return object;
+ };
+
+ /**
+ * Converts this TopologyCell to JSON.
+ * @function toJSON
+ * @memberof vtctldata.TopologyCell
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ TopologyCell.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return TopologyCell;
+ })();
+
vtctldata.GetVSchemaRequest = (function() {
/**
@@ -100587,6 +102781,7 @@ $root.vtctldata = (function() {
* @property {topodata.ITabletAlias|null} [new_primary] PlannedReparentShardRequest new_primary
* @property {topodata.ITabletAlias|null} [avoid_primary] PlannedReparentShardRequest avoid_primary
* @property {vttime.IDuration|null} [wait_replicas_timeout] PlannedReparentShardRequest wait_replicas_timeout
+ * @property {topodata.ITabletAlias|null} [expected_primary] PlannedReparentShardRequest expected_primary
*/
/**
@@ -100644,6 +102839,14 @@ $root.vtctldata = (function() {
*/
PlannedReparentShardRequest.prototype.wait_replicas_timeout = null;
+ /**
+ * PlannedReparentShardRequest expected_primary.
+ * @member {topodata.ITabletAlias|null|undefined} expected_primary
+ * @memberof vtctldata.PlannedReparentShardRequest
+ * @instance
+ */
+ PlannedReparentShardRequest.prototype.expected_primary = null;
+
/**
* Creates a new PlannedReparentShardRequest instance using the specified properties.
* @function create
@@ -100678,6 +102881,8 @@ $root.vtctldata = (function() {
$root.topodata.TabletAlias.encode(message.avoid_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim();
if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout"))
$root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim();
+ if (message.expected_primary != null && Object.hasOwnProperty.call(message, "expected_primary"))
+ $root.topodata.TabletAlias.encode(message.expected_primary, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim();
return writer;
};
@@ -100727,6 +102932,9 @@ $root.vtctldata = (function() {
case 5:
message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32());
break;
+ case 8:
+ message.expected_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -100783,6 +102991,11 @@ $root.vtctldata = (function() {
if (error)
return "wait_replicas_timeout." + error;
}
+ if (message.expected_primary != null && message.hasOwnProperty("expected_primary")) {
+ var error = $root.topodata.TabletAlias.verify(message.expected_primary);
+ if (error)
+ return "expected_primary." + error;
+ }
return null;
};
@@ -100817,6 +103030,11 @@ $root.vtctldata = (function() {
throw TypeError(".vtctldata.PlannedReparentShardRequest.wait_replicas_timeout: object expected");
message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout);
}
+ if (object.expected_primary != null) {
+ if (typeof object.expected_primary !== "object")
+ throw TypeError(".vtctldata.PlannedReparentShardRequest.expected_primary: object expected");
+ message.expected_primary = $root.topodata.TabletAlias.fromObject(object.expected_primary);
+ }
return message;
};
@@ -100839,6 +103057,7 @@ $root.vtctldata = (function() {
object.new_primary = null;
object.avoid_primary = null;
object.wait_replicas_timeout = null;
+ object.expected_primary = null;
}
if (message.keyspace != null && message.hasOwnProperty("keyspace"))
object.keyspace = message.keyspace;
@@ -100850,6 +103069,8 @@ $root.vtctldata = (function() {
object.avoid_primary = $root.topodata.TabletAlias.toObject(message.avoid_primary, options);
if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout"))
object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options);
+ if (message.expected_primary != null && message.hasOwnProperty("expected_primary"))
+ object.expected_primary = $root.topodata.TabletAlias.toObject(message.expected_primary, options);
return object;
};
@@ -105708,6 +107929,7 @@ $root.vtctldata = (function() {
* @interface IRestoreFromBackupRequest
* @property {topodata.ITabletAlias|null} [tablet_alias] RestoreFromBackupRequest tablet_alias
* @property {vttime.ITime|null} [backup_time] RestoreFromBackupRequest backup_time
+ * @property {Array.|null} [allowed_backup_engines] RestoreFromBackupRequest allowed_backup_engines
*/
/**
@@ -105719,6 +107941,7 @@ $root.vtctldata = (function() {
* @param {vtctldata.IRestoreFromBackupRequest=} [properties] Properties to set
*/
function RestoreFromBackupRequest(properties) {
+ this.allowed_backup_engines = [];
if (properties)
for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
if (properties[keys[i]] != null)
@@ -105741,6 +107964,14 @@ $root.vtctldata = (function() {
*/
RestoreFromBackupRequest.prototype.backup_time = null;
+ /**
+ * RestoreFromBackupRequest allowed_backup_engines.
+ * @member {Array.} allowed_backup_engines
+ * @memberof vtctldata.RestoreFromBackupRequest
+ * @instance
+ */
+ RestoreFromBackupRequest.prototype.allowed_backup_engines = $util.emptyArray;
+
/**
* Creates a new RestoreFromBackupRequest instance using the specified properties.
* @function create
@@ -105769,6 +108000,9 @@ $root.vtctldata = (function() {
$root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim();
if (message.backup_time != null && Object.hasOwnProperty.call(message, "backup_time"))
$root.vttime.Time.encode(message.backup_time, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
+ if (message.allowed_backup_engines != null && message.allowed_backup_engines.length)
+ for (var i = 0; i < message.allowed_backup_engines.length; ++i)
+ writer.uint32(/* id 6, wireType 2 =*/50).string(message.allowed_backup_engines[i]);
return writer;
};
@@ -105809,6 +108043,11 @@ $root.vtctldata = (function() {
case 2:
message.backup_time = $root.vttime.Time.decode(reader, reader.uint32());
break;
+ case 6:
+ if (!(message.allowed_backup_engines && message.allowed_backup_engines.length))
+ message.allowed_backup_engines = [];
+ message.allowed_backup_engines.push(reader.string());
+ break;
default:
reader.skipType(tag & 7);
break;
@@ -105854,6 +108093,13 @@ $root.vtctldata = (function() {
if (error)
return "backup_time." + error;
}
+ if (message.allowed_backup_engines != null && message.hasOwnProperty("allowed_backup_engines")) {
+ if (!Array.isArray(message.allowed_backup_engines))
+ return "allowed_backup_engines: array expected";
+ for (var i = 0; i < message.allowed_backup_engines.length; ++i)
+ if (!$util.isString(message.allowed_backup_engines[i]))
+ return "allowed_backup_engines: string[] expected";
+ }
return null;
};
@@ -105879,6 +108125,13 @@ $root.vtctldata = (function() {
throw TypeError(".vtctldata.RestoreFromBackupRequest.backup_time: object expected");
message.backup_time = $root.vttime.Time.fromObject(object.backup_time);
}
+ if (object.allowed_backup_engines) {
+ if (!Array.isArray(object.allowed_backup_engines))
+ throw TypeError(".vtctldata.RestoreFromBackupRequest.allowed_backup_engines: array expected");
+ message.allowed_backup_engines = [];
+ for (var i = 0; i < object.allowed_backup_engines.length; ++i)
+ message.allowed_backup_engines[i] = String(object.allowed_backup_engines[i]);
+ }
return message;
};
@@ -105895,6 +108148,8 @@ $root.vtctldata = (function() {
if (!options)
options = {};
var object = {};
+ if (options.arrays || options.defaults)
+ object.allowed_backup_engines = [];
if (options.defaults) {
object.tablet_alias = null;
object.backup_time = null;
@@ -105903,6 +108158,11 @@ $root.vtctldata = (function() {
object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options);
if (message.backup_time != null && message.hasOwnProperty("backup_time"))
object.backup_time = $root.vttime.Time.toObject(message.backup_time, options);
+ if (message.allowed_backup_engines && message.allowed_backup_engines.length) {
+ object.allowed_backup_engines = [];
+ for (var j = 0; j < message.allowed_backup_engines.length; ++j)
+ object.allowed_backup_engines[j] = message.allowed_backup_engines[j];
+ }
return object;
};
@@ -116749,6 +119009,419 @@ $root.vtctldata = (function() {
return ValidateVersionKeyspaceResponse;
})();
+ vtctldata.ValidateVersionShardRequest = (function() {
+
+ /**
+ * Properties of a ValidateVersionShardRequest.
+ * @memberof vtctldata
+ * @interface IValidateVersionShardRequest
+ * @property {string|null} [keyspace] ValidateVersionShardRequest keyspace
+ * @property {string|null} [shard] ValidateVersionShardRequest shard
+ */
+
+ /**
+ * Constructs a new ValidateVersionShardRequest.
+ * @memberof vtctldata
+ * @classdesc Represents a ValidateVersionShardRequest.
+ * @implements IValidateVersionShardRequest
+ * @constructor
+ * @param {vtctldata.IValidateVersionShardRequest=} [properties] Properties to set
+ */
+ function ValidateVersionShardRequest(properties) {
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * ValidateVersionShardRequest keyspace.
+ * @member {string} keyspace
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @instance
+ */
+ ValidateVersionShardRequest.prototype.keyspace = "";
+
+ /**
+ * ValidateVersionShardRequest shard.
+ * @member {string} shard
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @instance
+ */
+ ValidateVersionShardRequest.prototype.shard = "";
+
+ /**
+ * Creates a new ValidateVersionShardRequest instance using the specified properties.
+ * @function create
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {vtctldata.IValidateVersionShardRequest=} [properties] Properties to set
+ * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest instance
+ */
+ ValidateVersionShardRequest.create = function create(properties) {
+ return new ValidateVersionShardRequest(properties);
+ };
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages.
+ * @function encode
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {vtctldata.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateVersionShardRequest.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace"))
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace);
+ if (message.shard != null && Object.hasOwnProperty.call(message, "shard"))
+ writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {vtctldata.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateVersionShardRequest.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateVersionShardRequest.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionShardRequest();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.keyspace = reader.string();
+ break;
+ case 2:
+ message.shard = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateVersionShardRequest.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a ValidateVersionShardRequest message.
+ * @function verify
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ ValidateVersionShardRequest.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.keyspace != null && message.hasOwnProperty("keyspace"))
+ if (!$util.isString(message.keyspace))
+ return "keyspace: string expected";
+ if (message.shard != null && message.hasOwnProperty("shard"))
+ if (!$util.isString(message.shard))
+ return "shard: string expected";
+ return null;
+ };
+
+ /**
+ * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest
+ */
+ ValidateVersionShardRequest.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtctldata.ValidateVersionShardRequest)
+ return object;
+ var message = new $root.vtctldata.ValidateVersionShardRequest();
+ if (object.keyspace != null)
+ message.keyspace = String(object.keyspace);
+ if (object.shard != null)
+ message.shard = String(object.shard);
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @static
+ * @param {vtctldata.ValidateVersionShardRequest} message ValidateVersionShardRequest
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ ValidateVersionShardRequest.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.defaults) {
+ object.keyspace = "";
+ object.shard = "";
+ }
+ if (message.keyspace != null && message.hasOwnProperty("keyspace"))
+ object.keyspace = message.keyspace;
+ if (message.shard != null && message.hasOwnProperty("shard"))
+ object.shard = message.shard;
+ return object;
+ };
+
+ /**
+ * Converts this ValidateVersionShardRequest to JSON.
+ * @function toJSON
+ * @memberof vtctldata.ValidateVersionShardRequest
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ ValidateVersionShardRequest.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return ValidateVersionShardRequest;
+ })();
+
+ vtctldata.ValidateVersionShardResponse = (function() {
+
+ /**
+ * Properties of a ValidateVersionShardResponse.
+ * @memberof vtctldata
+ * @interface IValidateVersionShardResponse
+ * @property {Array.|null} [results] ValidateVersionShardResponse results
+ */
+
+ /**
+ * Constructs a new ValidateVersionShardResponse.
+ * @memberof vtctldata
+ * @classdesc Represents a ValidateVersionShardResponse.
+ * @implements IValidateVersionShardResponse
+ * @constructor
+ * @param {vtctldata.IValidateVersionShardResponse=} [properties] Properties to set
+ */
+ function ValidateVersionShardResponse(properties) {
+ this.results = [];
+ if (properties)
+ for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i)
+ if (properties[keys[i]] != null)
+ this[keys[i]] = properties[keys[i]];
+ }
+
+ /**
+ * ValidateVersionShardResponse results.
+ * @member {Array.} results
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @instance
+ */
+ ValidateVersionShardResponse.prototype.results = $util.emptyArray;
+
+ /**
+ * Creates a new ValidateVersionShardResponse instance using the specified properties.
+ * @function create
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {vtctldata.IValidateVersionShardResponse=} [properties] Properties to set
+ * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse instance
+ */
+ ValidateVersionShardResponse.create = function create(properties) {
+ return new ValidateVersionShardResponse(properties);
+ };
+
+ /**
+ * Encodes the specified ValidateVersionShardResponse message. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages.
+ * @function encode
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {vtctldata.IValidateVersionShardResponse} message ValidateVersionShardResponse message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateVersionShardResponse.encode = function encode(message, writer) {
+ if (!writer)
+ writer = $Writer.create();
+ if (message.results != null && message.results.length)
+ for (var i = 0; i < message.results.length; ++i)
+ writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]);
+ return writer;
+ };
+
+ /**
+ * Encodes the specified ValidateVersionShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages.
+ * @function encodeDelimited
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {vtctldata.IValidateVersionShardResponse} message ValidateVersionShardResponse message or plain object to encode
+ * @param {$protobuf.Writer} [writer] Writer to encode to
+ * @returns {$protobuf.Writer} Writer
+ */
+ ValidateVersionShardResponse.encodeDelimited = function encodeDelimited(message, writer) {
+ return this.encode(message, writer).ldelim();
+ };
+
+ /**
+ * Decodes a ValidateVersionShardResponse message from the specified reader or buffer.
+ * @function decode
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @param {number} [length] Message length if known beforehand
+ * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateVersionShardResponse.decode = function decode(reader, length) {
+ if (!(reader instanceof $Reader))
+ reader = $Reader.create(reader);
+ var end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionShardResponse();
+ while (reader.pos < end) {
+ var tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ if (!(message.results && message.results.length))
+ message.results = [];
+ message.results.push(reader.string());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ };
+
+ /**
+ * Decodes a ValidateVersionShardResponse message from the specified reader or buffer, length delimited.
+ * @function decodeDelimited
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from
+ * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse
+ * @throws {Error} If the payload is not a reader or valid buffer
+ * @throws {$protobuf.util.ProtocolError} If required fields are missing
+ */
+ ValidateVersionShardResponse.decodeDelimited = function decodeDelimited(reader) {
+ if (!(reader instanceof $Reader))
+ reader = new $Reader(reader);
+ return this.decode(reader, reader.uint32());
+ };
+
+ /**
+ * Verifies a ValidateVersionShardResponse message.
+ * @function verify
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {Object.} message Plain object to verify
+ * @returns {string|null} `null` if valid, otherwise the reason why it is not
+ */
+ ValidateVersionShardResponse.verify = function verify(message) {
+ if (typeof message !== "object" || message === null)
+ return "object expected";
+ if (message.results != null && message.hasOwnProperty("results")) {
+ if (!Array.isArray(message.results))
+ return "results: array expected";
+ for (var i = 0; i < message.results.length; ++i)
+ if (!$util.isString(message.results[i]))
+ return "results: string[] expected";
+ }
+ return null;
+ };
+
+ /**
+ * Creates a ValidateVersionShardResponse message from a plain object. Also converts values to their respective internal types.
+ * @function fromObject
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {Object.} object Plain object
+ * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse
+ */
+ ValidateVersionShardResponse.fromObject = function fromObject(object) {
+ if (object instanceof $root.vtctldata.ValidateVersionShardResponse)
+ return object;
+ var message = new $root.vtctldata.ValidateVersionShardResponse();
+ if (object.results) {
+ if (!Array.isArray(object.results))
+ throw TypeError(".vtctldata.ValidateVersionShardResponse.results: array expected");
+ message.results = [];
+ for (var i = 0; i < object.results.length; ++i)
+ message.results[i] = String(object.results[i]);
+ }
+ return message;
+ };
+
+ /**
+ * Creates a plain object from a ValidateVersionShardResponse message. Also converts values to other types if specified.
+ * @function toObject
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @static
+ * @param {vtctldata.ValidateVersionShardResponse} message ValidateVersionShardResponse
+ * @param {$protobuf.IConversionOptions} [options] Conversion options
+ * @returns {Object.} Plain object
+ */
+ ValidateVersionShardResponse.toObject = function toObject(message, options) {
+ if (!options)
+ options = {};
+ var object = {};
+ if (options.arrays || options.defaults)
+ object.results = [];
+ if (message.results && message.results.length) {
+ object.results = [];
+ for (var j = 0; j < message.results.length; ++j)
+ object.results[j] = message.results[j];
+ }
+ return object;
+ };
+
+ /**
+ * Converts this ValidateVersionShardResponse to JSON.
+ * @function toJSON
+ * @memberof vtctldata.ValidateVersionShardResponse
+ * @instance
+ * @returns {Object.} JSON object
+ */
+ ValidateVersionShardResponse.prototype.toJSON = function toJSON() {
+ return this.constructor.toObject(this, $protobuf.util.toJSONOptions);
+ };
+
+ return ValidateVersionShardResponse;
+ })();
+
vtctldata.ValidateVSchemaRequest = (function() {
/**
@@ -120535,6 +123208,7 @@ $root.binlogdata = (function() {
* @property {number} VERSION=17 VERSION value
* @property {number} LASTPK=18 LASTPK value
* @property {number} SAVEPOINT=19 SAVEPOINT value
+ * @property {number} COPY_COMPLETED=20 COPY_COMPLETED value
*/
binlogdata.VEventType = (function() {
var valuesById = {}, values = Object.create(valuesById);
@@ -120558,6 +123232,7 @@ $root.binlogdata = (function() {
values[valuesById[17] = "VERSION"] = 17;
values[valuesById[18] = "LASTPK"] = 18;
values[valuesById[19] = "SAVEPOINT"] = 19;
+ values[valuesById[20] = "COPY_COMPLETED"] = 20;
return values;
})();
@@ -122800,6 +125475,7 @@ $root.binlogdata = (function() {
case 17:
case 18:
case 19:
+ case 20:
break;
}
if (message.timestamp != null && message.hasOwnProperty("timestamp"))
@@ -122947,6 +125623,10 @@ $root.binlogdata = (function() {
case 19:
message.type = 19;
break;
+ case "COPY_COMPLETED":
+ case 20:
+ message.type = 20;
+ break;
}
if (object.timestamp != null)
if ($util.Long)