diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index a56aad2f523..0c558b00684 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -72,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index 00aab1b78ff..680b0da87e0 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -76,7 +76,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml b/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml index 72426e70a61..dd389663a35 100644 --- a/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml +++ b/.github/workflows/upgrade_downgrade_test_onlineddl_flow.yml @@ -83,7 +83,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index 5ac1a55334c..e4ccc793933 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -75,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml new file mode 100644 index 00000000000..87a78d0b659 --- /dev/null +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2.yml @@ -0,0 +1,212 @@ +name: Query Serving (Queries - 2) - Upgrade Downgrade Testing +on: + push: + pull_request: + +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Query Serving (Queries - 2)') + cancel-in-progress: true + +permissions: read-all + +# This test ensures that our end-to-end tests work using Vitess components +# (vtgate, vttablet, etc) built on different versions. + +jobs: + + upgrade_downgrade_test: + name: Run Upgrade Downgrade Test - Query Serving (Queries - 2) + runs-on: gh-hosted-runners-16cores-1-24.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + - name: Check out commit's code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**' + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/upgrade_downgrade_test_query_serving_queries.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version: 1.23.4 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + + - name: Get base dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + # Install mysql80 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd-client etcd-server curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + # Build current commit's binaries + - name: Get dependencies for this commit + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + go mod download + + - name: Building the binaries for this commit + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 10 + run: | + source build.env + NOVTADMINBUILD=1 make build + mkdir -p /tmp/vitess-build-current/ + cp -R bin /tmp/vitess-build-current/ + rm -Rf bin/* + + # Checkout to the last release of Vitess + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} + + - name: Get dependencies for the last release + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + go mod download + + - name: Building last release's binaries + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 10 + run: | + source build.env + NOVTADMINBUILD=1 make build + mkdir -p /tmp/vitess-build-other/ + cp -R bin /tmp/vitess-build-other/ + rm -Rf bin/* + + - name: Convert ErrorContains checks to Error checks + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} + + find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} + + + # Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n + - name: Use last release's VTGate + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + source build.env + + cp -r /tmp/vitess-build-current/bin/* $PWD/bin/ + rm -f $PWD/bin/vtgate + cp /tmp/vitess-build-other/bin/vtgate $PWD/bin/vtgate + vtgate --version + + # Running a test with vtgate at version n-1 and vttablet/vtctld at version n + - name: Run query serving tests (vtgate=N-1, vttablet=N, vtctld=N) + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + rm -rf /tmp/vtdataroot + mkdir -p /tmp/vtdataroot + + source build.env + eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries_2 + + # Swap the binaries again. This time, vtgate will be at version n, and vttablet/vtctld will be at version n-1 + - name: Use current version VTGate, and other version VTTablet/VTctld + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + source build.env + + rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld + cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate + + cp /tmp/vitess-build-other/bin/vtctld $PWD/bin + cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin + cp /tmp/vitess-build-other/bin/vtctl $PWD/bin + cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin + + cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld + vtgate --version + vttablet --version + + # Running a test with vtgate at version n and vttablet/vtctld at version n-1 + - name: Run query serving tests (vtgate=N, vttablet=N-1, vtctld=N-1) + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + rm -rf /tmp/vtdataroot + mkdir -p /tmp/vtdataroot + + source build.env + eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries_2 diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml new file mode 100644 index 00000000000..d3cfc662b5b --- /dev/null +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_2_next_release.yml @@ -0,0 +1,208 @@ +name: Query Serving (Queries - 2) Next Release - Upgrade Downgrade Testing +on: + push: + pull_request: + +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Query Serving (Queries - 2) Next Release') + cancel-in-progress: true + +permissions: read-all + +# This test ensures that our end-to-end tests work using Vitess components +# (vtgate, vttablet, etc) built on different versions. + +jobs: + + upgrade_downgrade_test: + name: Run Upgrade Downgrade Test - Query Serving (Queries - 2) Next Release + runs-on: gh-hosted-runners-16cores-1-24.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check out commit's code + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**' + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version-file: go.mod + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + + - name: Get base dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + # Uninstall any nextly installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + # Install mysql80 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd-client etcd-server curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + # Checkout to the next release of Vitess + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} + + - name: Get dependencies for the next release + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + go mod download + + - name: Building next release's binaries + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 10 + run: | + source build.env + NOVTADMINBUILD=1 make build + mkdir -p /tmp/vitess-build-other/ + cp -R bin /tmp/vitess-build-other/ + rm -Rf bin/* + + # Checkout to this build's commit + - name: Check out commit's code + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Get dependencies for this commit + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + go mod download + + - name: Building the binaries for this commit + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 10 + run: | + source build.env + NOVTADMINBUILD=1 make build + mkdir -p /tmp/vitess-build-current/ + cp -R bin /tmp/vitess-build-current/ + + - name: Convert ErrorContains checks to Error checks + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} + + find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} + + + # Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n + - name: Use next release's VTGate + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + source build.env + rm -f $PWD/bin/vtgate + cp /tmp/vitess-build-other/bin/vtgate $PWD/bin/vtgate + vtgate --version + + # Running a test with vtgate at version n+1 and vttablet at version n + - name: Run query serving tests (vtgate=N+1, vttablet=N) + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + rm -rf /tmp/vtdataroot + mkdir -p /tmp/vtdataroot + + source build.env + eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries_2 + + # Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1 + - name: Use current version VTGate, and other version VTTablet + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + source build.env + + rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld + cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate + cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld + vtgate --version + vttablet --version + + # Running a test with vtgate at version n and vttablet at version n+1 + - name: Run query serving tests (vtgate=N, vttablet=N+1) + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + rm -rf /tmp/vtdataroot + mkdir -p /tmp/vtdataroot + + source build.env + eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries_2 diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 4b5fad0ab29..5616c247888 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -75,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index 701025d7ecc..532be0b998e 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -75,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index 8b121d4af10..2804e757652 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -75,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_semi_sync.yml b/.github/workflows/upgrade_downgrade_test_semi_sync.yml index f12e323654e..76706f77b65 100644 --- a/.github/workflows/upgrade_downgrade_test_semi_sync.yml +++ b/.github/workflows/upgrade_downgrade_test_semi_sync.yml @@ -72,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: 1.22.7 + go-version: 1.23.4 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/changelog/22.0/22.0.0/summary.md b/changelog/22.0/22.0.0/summary.md index 3f63b2d868f..e63ffcc3547 100644 --- a/changelog/22.0/22.0.0/summary.md +++ b/changelog/22.0/22.0.0/summary.md @@ -8,7 +8,10 @@ - **[RPC Changes](#rpc-changes)** - **[Prefer not promoting a replica that is currently taking a backup](#reparents-prefer-not-backing-up)** - **[VTOrc Config File Changes](#vtorc-config-file-changes)** + - **[VTGate Config File Changes](#vtgate-config-file-changes)** - **[Support for More Efficient JSON Replication](#efficient-json-replication)** + - **[Support for LAST_INSERT_ID(x)](#last-insert-id)** + - **[Support for Maximum Idle Connections in the Pool](#max-idle-connections)** - **[Minor Changes](#minor-changes)** - **[VTTablet Flags](#flags-vttablet)** - **[Topology read concurrency behaviour changes](#topo-read-concurrency-changes)** @@ -60,12 +63,42 @@ The following fields can be dynamically changed - To upgrade to the newer version of the configuration file, first switch to using the flags in your current deployment before upgrading. Then you can switch to using the configuration file in the newer release. +### VTGate Config File Changes + +The Viper configuration keys for the following flags has been changed to match their flag names. Previously they had a discovery prefix instead of it being part of the name. + +| Flag Name | Old Configuration Key | New Configuration Key | +|--------------------------------------------------|--------------------------------------------------|--------------------------------------------------| +| `discovery_low_replication_lag` | `discovery.low_replication_lag` | `discovery_low_replication_lag` | +| `discovery_high_replication_lag_minimum_serving` | `discovery.high_replication_lag_minimum_serving` | `discovery_high_replication_lag_minimum_serving` | +| `discovery_min_number_serving_vttablets` | `discovery.min_number_serving_vttablets` | `discovery_min_number_serving_vttablets` | +| `discovery_legacy_replication_lag_algorithm` | `discovery.legacy_replication_lag_algorithm` | `discovery_legacy_replication_lag_algorithm` | + +To upgrade to the newer version of the configuration keys, first switch to using the flags in your current deployment before upgrading. Then you can switch to using the new configuration keys in the newer release. + ### Support for More Efficient JSON Replication In [#7345](https://github.com/vitessio/vitess/pull/17345) we added support for [`--binlog-row-value-options=PARTIAL_JSON`](https://dev.mysql.com/doc/refman/en/replication-options-binary-log.html#sysvar_binlog_row_value_options). You can read more about [this feature added to MySQL 8.0 here](https://dev.mysql.com/blog-archive/efficient-json-replication-in-mysql-8-0/). If you are using MySQL 8.0 or later and using JSON columns, you can now enable this MySQL feature across your Vitess cluster(s) to lower the disk space needed for binary logs and improve the CPU and memory usage in both `mysqld` (standard intrashard MySQL replication) and `vttablet` ([VReplication](https://vitess.io/docs/reference/vreplication/vreplication/)) without losing any capabilities or features. +### Support for `LAST_INSERT_ID(x)` + +In [#17408](https://github.com/vitessio/vitess/pull/17408) and [#17409](https://github.com/vitessio/vitess/pull/17409), we added the ability to use `LAST_INSERT_ID(x)` in Vitess directly at vtgate. This improvement allows certain queries—like `SELECT last_insert_id(123);` or `SELECT last_insert_id(count(*)) ...`—to be handled without relying on MySQL for the final value. + +**Limitations**: +- When using `LAST_INSERT_ID(x)` in ordered queries (e.g., `SELECT last_insert_id(col) FROM table ORDER BY foo`), MySQL sets the session’s last-insert-id value according to the *last row returned*. Vitess does not guarantee the same behavior. + +### Support for Maximum Idle Connections in the Pool + +In [#17443](https://github.com/vitessio/vitess/pull/17443) we introduced a new configurable max-idle-count parameter for connection pools. This allows you to specify the maximum number of idle connections retained in each connection pool to optimize performance and resource efficiency. + +You can control idle connection retention for the query server’s query pool, stream pool, and transaction pool with the following flags: +• --queryserver-config-query-pool-max-idle-count: Defines the maximum number of idle connections retained in the query pool. +• --queryserver-config-stream-pool-max-idle-count: Defines the maximum number of idle connections retained in the stream pool. +• --queryserver-config-txpool-max-idle-count: Defines the maximum number of idle connections retained in the transaction pool. + +This feature ensures that, during traffic spikes, idle connections are available for faster responses, while minimizing overhead in low-traffic periods by limiting the number of idle connections retained. It helps strike a balance between performance, efficiency, and cost. ## Minor Changes diff --git a/examples/backups/create_commerce_schema.sql b/examples/backups/create_commerce_schema.sql index f24689a068f..dfb785200c8 100644 --- a/examples/backups/create_commerce_schema.sql +++ b/examples/backups/create_commerce_schema.sql @@ -1,6 +1,6 @@ create table if not exists product( - sku varbinary(128), - description varbinary(128), + sku varchar(128), + description varchar(128), price bigint, primary key(sku) ) ENGINE=InnoDB; diff --git a/examples/backups/create_customer_schema.sql b/examples/backups/create_customer_schema.sql index 166a56e4c02..f04e6186d8f 100644 --- a/examples/backups/create_customer_schema.sql +++ b/examples/backups/create_customer_schema.sql @@ -1,13 +1,13 @@ create table if not exists customer( customer_id bigint not null, - email varbinary(128), + email varchar(128), primary key(customer_id) ) ENGINE=InnoDB; create table if not exists corder( order_id bigint not null, customer_id bigint, - sku varbinary(128), + sku varchar(128), price bigint, primary key(order_id) ) ENGINE=InnoDB; diff --git a/examples/compose/default_vschema.json b/examples/compose/default_vschema.json index e0b50a66037..0ce3aa0868c 100644 --- a/examples/compose/default_vschema.json +++ b/examples/compose/default_vschema.json @@ -1,8 +1,8 @@ { "sharded": false, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" } } -} +} \ No newline at end of file diff --git a/examples/compose/lookup_keyspace_vschema.json b/examples/compose/lookup_keyspace_vschema.json index f67289821fe..cf0087adc51 100644 --- a/examples/compose/lookup_keyspace_vschema.json +++ b/examples/compose/lookup_keyspace_vschema.json @@ -5,7 +5,7 @@ "column_vindexes": [ { "column": "id", - "name": "hash" + "name": "xxhash" } ] }, @@ -13,14 +13,14 @@ "column_vindexes": [ { "column": "id", - "name": "hash" + "name": "xxhash" } ] } }, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" } } } \ No newline at end of file diff --git a/examples/compose/test_keyspace_vschema.json b/examples/compose/test_keyspace_vschema.json index 55d0df96204..d79a31e854c 100644 --- a/examples/compose/test_keyspace_vschema.json +++ b/examples/compose/test_keyspace_vschema.json @@ -5,7 +5,7 @@ "column_vindexes": [ { "column": "page", - "name": "hash" + "name": "xxhash" }, { "column": "message", @@ -17,7 +17,7 @@ "column_vindexes": [ { "column": "page", - "name": "hash" + "name": "xxhash" }, { "column": "token", @@ -27,8 +27,8 @@ } }, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" }, "messages_message_lookup": { "type": "lookup_hash", diff --git a/examples/compose/vtcompose/base_vschema.json b/examples/compose/vtcompose/base_vschema.json index b867400e5ee..a24905a9411 100644 --- a/examples/compose/vtcompose/base_vschema.json +++ b/examples/compose/vtcompose/base_vschema.json @@ -1,10 +1,9 @@ { "sharded": true, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" } }, - "tables": { - } -} + "tables": {} +} \ No newline at end of file diff --git a/examples/demo/schema/customer/vschema.json b/examples/demo/schema/customer/vschema.json index 9c361475227..4a8a4941612 100644 --- a/examples/demo/schema/customer/vschema.json +++ b/examples/demo/schema/customer/vschema.json @@ -1,8 +1,8 @@ { "sharded": true, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" }, "corder_keyspace_idx": { "type": "consistent_lookup_unique", @@ -31,49 +31,63 @@ }, "tables": { "customer": { - "column_vindexes": [{ - "column": "customer_id", - "name": "hash" - }], + "column_vindexes": [ + { + "column": "customer_id", + "name": "xxhash" + } + ], "auto_increment": { "column": "customer_id", "sequence": "product.customer_seq" } }, "corder": { - "column_vindexes": [{ - "column": "customer_id", - "name": "hash" - }, { - "column": "corder_id", - "name": "corder_keyspace_idx" - }, { - "columns": ["oname", "corder_id"], - "name": "oname_keyspace_idx" - }], + "column_vindexes": [ + { + "column": "customer_id", + "name": "xxhash" + }, + { + "column": "corder_id", + "name": "corder_keyspace_idx" + }, + { + "columns": [ + "oname", + "corder_id" + ], + "name": "oname_keyspace_idx" + } + ], "auto_increment": { "column": "corder_id", "sequence": "product.corder_seq" } }, "corder_event": { - "column_vindexes": [{ - "column": "corder_id", - "name": "corder_keyspace_idx" - }, { - "column": "keyspace_id", - "name": "binary" - }], + "column_vindexes": [ + { + "column": "corder_id", + "name": "corder_keyspace_idx" + }, + { + "column": "keyspace_id", + "name": "binary" + } + ], "auto_increment": { "column": "corder_event_id", "sequence": "product.corder_event_seq" } }, "oname_keyspace_idx": { - "column_vindexes": [{ - "column": "oname", - "name": "unicode_loose_md5" - }] + "column_vindexes": [ + { + "column": "oname", + "name": "unicode_loose_md5" + } + ] } } -} +} \ No newline at end of file diff --git a/examples/local/401_backup.sh b/examples/local/401_backup.sh new file mode 100755 index 00000000000..1529440ea26 --- /dev/null +++ b/examples/local/401_backup.sh @@ -0,0 +1,40 @@ +# Copyright 2025 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script takes backups of the 'customer' keyspace and all its shards. + +# Load common environment variables and functions +source ../common/env.sh + +# Set keyspace and shard details for the 'customer' keyspace +KEYSPACE="customer" +SHARDS=("-80" "80-") + +# Ensure the keyspace and shards are healthy +echo "Ensuring keyspace $KEYSPACE exists and shards are healthy..." +for shard in "${SHARDS[@]}"; do + if ! wait_for_healthy_shard "$KEYSPACE" "$shard"; then + echo "Shard $shard is not healthy. Exiting..." + exit 1 + fi +done + +# Backup all shards of the customer keyspace +for shard in "${SHARDS[@]}"; do + echo "Backing up shard $shard in keyspace $KEYSPACE..." + vtctldclient BackupShard "$KEYSPACE/$shard" || fail "Backup failed for shard $shard." + echo "Backup succeeded for shard $shard." +done + +echo "Backup process completed successfully for all shards in $KEYSPACE." diff --git a/examples/local/402_list_backup.sh b/examples/local/402_list_backup.sh new file mode 100755 index 00000000000..c19dc5f6df1 --- /dev/null +++ b/examples/local/402_list_backup.sh @@ -0,0 +1,28 @@ +# Copyright 2025 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Load common environment variables and functions +source ../common/env.sh # Import necessary environment variables and functions from a common script + +# Set keyspace and shard details for the 'customer' keyspace +KEYSPACE="customer" # Define the keyspace to work with +SHARDS=("-80" "80-") # Define the shards within the keyspace to list backups for + +# List backups for each shard +for shard in "${SHARDS[@]}"; do # Loop through each shard defined earlier + echo "Listing available backups for keyspace $KEYSPACE and shard $shard..." # Log the start of the backup listing + vtctldclient GetBackups "$KEYSPACE/$shard" || echo "Failed to list backups for keyspace $KEYSPACE and shard $shard" # Attempt to list backups; log failure if it occurs +done + +echo "Backup listing process completed." # Log completion of the backup listing process diff --git a/examples/local/403_restore_from_backup.sh b/examples/local/403_restore_from_backup.sh new file mode 100755 index 00000000000..73fafd34aa0 --- /dev/null +++ b/examples/local/403_restore_from_backup.sh @@ -0,0 +1,46 @@ +# Copyright 2025 The Vitess Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script restores the first replica tablet from backups for the 'customer' keyspace. + +# Load common environment variables and functions +source ../common/env.sh # Import necessary environment variables and functions from a common script + +# Set keyspace and shard details for the 'customer' keyspace +KEYSPACE="customer" # Define the keyspace to work with +SHARDS=("-80" "80-") # Define the shards within the keyspace to restore + +# Restore all shards of the customer keyspace from backups +for shard in "${SHARDS[@]}"; do # Loop through each shard defined earlier + echo "Finding replica tablets for shard $shard..." # Log the start of the tablet search + + # Fetch the list of replica tablets for the current shard + REPLICA_TABLETS=$(vtctldclient GetTablets --keyspace="$KEYSPACE" --shard="$shard" --tablet-type=replica | awk '{print $1}') # Extract the first column containing tablet names + REPLICA_COUNT=$(echo "$REPLICA_TABLETS" | wc -l) # Count the number of replica tablets found + + # Check if any replica tablets were found + if [ "$REPLICA_COUNT" -lt 1 ]; then # If the count is less than 1, no replicas were found + echo "No replica tablets found for shard $shard. Exiting..." # Log a message and exit if none are found + exit 1 # Exit the script with an error code + fi + + # Choose the first replica for restoration + RESTORE_TABLET=$(echo "$REPLICA_TABLETS" | head -n 1) # Select the first replica tablet from the list + echo "Restoring tablet $RESTORE_TABLET from backup for shard $shard..." # Log the restoration action + + # Restore from backup and handle any failures + vtctldclient RestoreFromBackup "$RESTORE_TABLET" || fail "Restore failed for tablet $RESTORE_TABLET" # Attempt to restore from backup and log an error message if it fails +done + +echo "Restore process completed successfully for $KEYSPACE." # Log completion of the restore process diff --git a/examples/local/401_teardown.sh b/examples/local/501_teardown.sh similarity index 100% rename from examples/local/401_teardown.sh rename to examples/local/501_teardown.sh diff --git a/examples/local/create_commerce_schema.sql b/examples/local/create_commerce_schema.sql index d8768d88f82..d936d4e82ba 100644 --- a/examples/local/create_commerce_schema.sql +++ b/examples/local/create_commerce_schema.sql @@ -1,18 +1,18 @@ create table if not exists product( - sku varbinary(128), - description varbinary(128), + sku varchar(128), + description varchar(128), price bigint, primary key(sku) ) ENGINE=InnoDB; create table if not exists customer( customer_id bigint not null auto_increment, - email varbinary(128), + email varchar(128), primary key(customer_id) ) ENGINE=InnoDB; create table if not exists corder( order_id bigint not null auto_increment, customer_id bigint, - sku varbinary(128), + sku varchar(128), price bigint, primary key(order_id) ) ENGINE=InnoDB; diff --git a/examples/local/vschema.json b/examples/local/vschema.json index 17e5dedf0c8..4b922a0f72e 100644 --- a/examples/local/vschema.json +++ b/examples/local/vschema.json @@ -1,8 +1,8 @@ { "sharded": true, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" } }, "tables": { @@ -10,9 +10,9 @@ "column_vindexes": [ { "column": "page", - "name": "hash" + "name": "xxhash" } ] } } -} +} \ No newline at end of file diff --git a/examples/local/vschema_customer_sharded.json b/examples/local/vschema_customer_sharded.json index 3109e2a2f3c..d1c147e1892 100644 --- a/examples/local/vschema_customer_sharded.json +++ b/examples/local/vschema_customer_sharded.json @@ -1,8 +1,8 @@ { "sharded": true, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" } }, "tables": { @@ -10,7 +10,7 @@ "column_vindexes": [ { "column": "customer_id", - "name": "hash" + "name": "xxhash" } ], "auto_increment": { @@ -22,7 +22,7 @@ "column_vindexes": [ { "column": "customer_id", - "name": "hash" + "name": "xxhash" } ], "auto_increment": { @@ -31,4 +31,4 @@ } } } -} +} \ No newline at end of file diff --git a/examples/operator/create_commerce_schema.sql b/examples/operator/create_commerce_schema.sql index d8768d88f82..d936d4e82ba 100644 --- a/examples/operator/create_commerce_schema.sql +++ b/examples/operator/create_commerce_schema.sql @@ -1,18 +1,18 @@ create table if not exists product( - sku varbinary(128), - description varbinary(128), + sku varchar(128), + description varchar(128), price bigint, primary key(sku) ) ENGINE=InnoDB; create table if not exists customer( customer_id bigint not null auto_increment, - email varbinary(128), + email varchar(128), primary key(customer_id) ) ENGINE=InnoDB; create table if not exists corder( order_id bigint not null auto_increment, customer_id bigint, - sku varbinary(128), + sku varchar(128), price bigint, primary key(order_id) ) ENGINE=InnoDB; diff --git a/examples/operator/vschema_customer_sharded.json b/examples/operator/vschema_customer_sharded.json index 3109e2a2f3c..d1c147e1892 100644 --- a/examples/operator/vschema_customer_sharded.json +++ b/examples/operator/vschema_customer_sharded.json @@ -1,8 +1,8 @@ { "sharded": true, "vindexes": { - "hash": { - "type": "hash" + "xxhash": { + "type": "xxhash" } }, "tables": { @@ -10,7 +10,7 @@ "column_vindexes": [ { "column": "customer_id", - "name": "hash" + "name": "xxhash" } ], "auto_increment": { @@ -22,7 +22,7 @@ "column_vindexes": [ { "column": "customer_id", - "name": "hash" + "name": "xxhash" } ], "auto_increment": { @@ -31,4 +31,4 @@ } } } -} +} \ No newline at end of file diff --git a/examples/region_sharding/create_main_schema.sql b/examples/region_sharding/create_main_schema.sql index 9ee4f8d9450..ba91d74f45c 100644 --- a/examples/region_sharding/create_main_schema.sql +++ b/examples/region_sharding/create_main_schema.sql @@ -1,7 +1,7 @@ CREATE TABLE IF NOT EXISTS customer ( id int NOT NULL, - fullname varbinary(256), - nationalid varbinary(256), - country varbinary(256), + fullname varchar(256), + nationalid varchar(256), + country varchar(256), primary key(id) ); diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go index 565e0c8aa82..4d1156291cf 100644 --- a/go/cmd/vtctldclient/command/keyspaces.go +++ b/go/cmd/vtctldclient/command/keyspaces.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/constants/sidecar" @@ -153,7 +154,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { var snapshotTime *vttime.Time if topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType) == topodatapb.KeyspaceType_SNAPSHOT { - if createKeyspaceOptions.DurabilityPolicy != "none" { + if createKeyspaceOptions.DurabilityPolicy != policy.DurabilityNone { return errors.New("--durability-policy cannot be specified while creating a snapshot keyspace") } @@ -409,7 +410,7 @@ func init() { CreateKeyspace.Flags().Var(&createKeyspaceOptions.KeyspaceType, "type", "The type of the keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") - CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", policy.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") Root.AddCommand(CreateKeyspace) @@ -425,7 +426,7 @@ func init() { RemoveKeyspaceCell.Flags().BoolVarP(&removeKeyspaceCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified keyspace.") Root.AddCommand(RemoveKeyspaceCell) - SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.") + SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", policy.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.") Root.AddCommand(SetKeyspaceDurabilityPolicy) ValidateSchemaKeyspace.Flags().BoolVar(&validateSchemaKeyspaceOptions.IncludeViews, "include-views", false, "Includes views in compared schemas.") diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index 1c57dd0c08e..052c19ecaae 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -282,11 +282,13 @@ Flags: --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) + --queryserver-config-query-pool-max-idle-count int query server query pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods. --queryserver-config-query-pool-timeout duration query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. --queryserver-config-query-timeout duration query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) + --queryserver-config-stream-pool-max-idle-count int query server stream pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods. --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) --queryserver-config-stream-pool-timeout duration query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. --queryserver-config-strict-table-acl only allow queries that pass table acl checks @@ -294,6 +296,7 @@ Flags: --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) --queryserver-config-transaction-timeout duration query server transaction timeout, a transaction will be killed if it takes longer than this value (default 30s) --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) + --queryserver-config-txpool-max-idle-count int query server transaction pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods. --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this --queryserver-enable-views Enable views support in vttablet. diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index bc647fb5347..e2b0c30db7f 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -274,11 +274,13 @@ Flags: --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) + --queryserver-config-query-pool-max-idle-count int query server query pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods. --queryserver-config-query-pool-timeout duration query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. --queryserver-config-query-timeout duration query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) + --queryserver-config-stream-pool-max-idle-count int query server stream pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods. --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) --queryserver-config-stream-pool-timeout duration query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. --queryserver-config-strict-table-acl only allow queries that pass table acl checks @@ -286,6 +288,7 @@ Flags: --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) --queryserver-config-transaction-timeout duration query server transaction timeout, a transaction will be killed if it takes longer than this value (default 30s) --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) + --queryserver-config-txpool-max-idle-count int query server transaction pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods. --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this --queryserver-enable-views Enable views support in vttablet. diff --git a/go/pools/smartconnpool/pool.go b/go/pools/smartconnpool/pool.go index d49032f34a1..b024cc656df 100644 --- a/go/pools/smartconnpool/pool.go +++ b/go/pools/smartconnpool/pool.go @@ -92,6 +92,7 @@ type RefreshCheck func() (bool, error) type Config[C Connection] struct { Capacity int64 + MaxIdleCount int64 IdleTimeout time.Duration MaxLifetime time.Duration RefreshInterval time.Duration @@ -123,6 +124,8 @@ type ConnPool[C Connection] struct { active atomic.Int64 // capacity is the maximum number of connections that this pool can open capacity atomic.Int64 + // maxIdleCount is the maximum idle connections in the pool + idleCount atomic.Int64 // workers is a waitgroup for all the currently running worker goroutines workers sync.WaitGroup @@ -138,6 +141,8 @@ type ConnPool[C Connection] struct { // maxCapacity is the maximum value to which capacity can be set; when the pool // is re-opened, it defaults to this capacity maxCapacity int64 + // maxIdleCount is the maximum idle connections in the pool + maxIdleCount int64 // maxLifetime is the maximum time a connection can be open maxLifetime atomic.Int64 // idleTimeout is the maximum time a connection can remain idle @@ -158,6 +163,7 @@ func NewPool[C Connection](config *Config[C]) *ConnPool[C] { pool := &ConnPool[C]{} pool.freshSettingsStack.Store(-1) pool.config.maxCapacity = config.Capacity + pool.config.maxIdleCount = config.MaxIdleCount pool.config.maxLifetime.Store(config.MaxLifetime.Nanoseconds()) pool.config.idleTimeout.Store(config.IdleTimeout.Nanoseconds()) pool.config.refreshInterval.Store(config.RefreshInterval.Nanoseconds()) @@ -192,6 +198,7 @@ func (pool *ConnPool[C]) runWorker(close <-chan struct{}, interval time.Duration func (pool *ConnPool[C]) open() { pool.close = make(chan struct{}) pool.capacity.Store(pool.config.maxCapacity) + pool.setIdleCount() // The expire worker takes care of removing from the waiter list any clients whose // context has been cancelled. @@ -315,6 +322,16 @@ func (pool *ConnPool[C]) MaxCapacity() int64 { return pool.config.maxCapacity } +func (pool *ConnPool[C]) setIdleCount() { + capacity := pool.Capacity() + maxIdleCount := pool.config.maxIdleCount + if maxIdleCount == 0 || maxIdleCount > capacity { + pool.idleCount.Store(capacity) + } else { + pool.idleCount.Store(maxIdleCount) + } +} + // InUse returns the number of connections that the pool has lent out to clients and that // haven't been returned yet. func (pool *ConnPool[C]) InUse() int64 { @@ -340,6 +357,10 @@ func (pool *ConnPool[C]) SetIdleTimeout(duration time.Duration) { pool.config.idleTimeout.Store(duration.Nanoseconds()) } +func (pool *ConnPool[D]) IdleCount() int64 { + return pool.idleCount.Load() +} + func (pool *ConnPool[D]) RefreshInterval() time.Duration { return time.Duration(pool.config.refreshInterval.Load()) } @@ -396,6 +417,10 @@ func (pool *ConnPool[C]) put(conn *Pooled[C]) { } if !pool.wait.tryReturnConn(conn) { + if pool.closeOnIdleLimitReached(conn) { + return + } + connSetting := conn.Conn.Setting() if connSetting == nil { pool.clean.Push(conn) @@ -407,6 +432,23 @@ func (pool *ConnPool[C]) put(conn *Pooled[C]) { } } +// closeOnIdleLimitReached closes a connection if the number of idle connections (active - inuse) in the pool +// exceeds the idleCount limit. It returns true if the connection is closed, false otherwise. +func (pool *ConnPool[C]) closeOnIdleLimitReached(conn *Pooled[C]) bool { + for { + open := pool.active.Load() + idle := open - pool.borrowed.Load() + if idle <= pool.idleCount.Load() { + return false + } + if pool.active.CompareAndSwap(open, open-1) { + pool.Metrics.idleClosed.Add(1) + conn.Close() + return true + } + } +} + func (pool *ConnPool[D]) extendedMaxLifetime() time.Duration { maxLifetime := pool.config.maxLifetime.Load() if maxLifetime == 0 { @@ -629,6 +671,9 @@ func (pool *ConnPool[C]) setCapacity(ctx context.Context, newcap int64) error { if oldcap == newcap { return nil } + // update the idle count to match the new capacity if necessary + // wait for connections to be returned to the pool if we're reducing the capacity. + defer pool.setIdleCount() const delay = 10 * time.Millisecond @@ -732,6 +777,9 @@ func (pool *ConnPool[C]) RegisterStats(stats *servenv.Exporter, name string) { // the smartconnpool doesn't have a maximum capacity return pool.Capacity() }) + stats.NewGaugeFunc(name+"IdleAllowed", "Tablet server conn pool idle allowed limit", func() int64 { + return pool.IdleCount() + }) stats.NewCounterFunc(name+"WaitCount", "Tablet server conn pool wait count", func() int64 { return pool.Metrics.WaitCount() }) diff --git a/go/pools/smartconnpool/pool_test.go b/go/pools/smartconnpool/pool_test.go index 701327005ad..44bd431d189 100644 --- a/go/pools/smartconnpool/pool_test.go +++ b/go/pools/smartconnpool/pool_test.go @@ -746,6 +746,51 @@ func TestExtendedLifetimeTimeout(t *testing.T) { } } +// TestMaxIdleCount tests the MaxIdleCount setting, to check if the pool closes +// the idle connections when the number of idle connections exceeds the limit. +func TestMaxIdleCount(t *testing.T) { + testMaxIdleCount := func(t *testing.T, setting *Setting, maxIdleCount int64, expClosedConn int) { + var state TestState + + ctx := context.Background() + p := NewPool(&Config[*TestConn]{ + Capacity: 5, + MaxIdleCount: maxIdleCount, + LogWait: state.LogWait, + }).Open(newConnector(&state), nil) + + defer p.Close() + + var conns []*Pooled[*TestConn] + for i := 0; i < 5; i++ { + r, err := p.Get(ctx, setting) + require.NoError(t, err) + assert.EqualValues(t, i+1, state.open.Load()) + assert.EqualValues(t, 0, p.Metrics.IdleClosed()) + + conns = append(conns, r) + } + + for _, conn := range conns { + p.put(conn) + } + + closedConn := 0 + for _, conn := range conns { + if conn.Conn.IsClosed() { + closedConn++ + } + } + assert.EqualValues(t, expClosedConn, closedConn) + assert.EqualValues(t, expClosedConn, p.Metrics.IdleClosed()) + } + + t.Run("WithoutSettings", func(t *testing.T) { testMaxIdleCount(t, nil, 2, 3) }) + t.Run("WithSettings", func(t *testing.T) { testMaxIdleCount(t, sFoo, 2, 3) }) + t.Run("WithoutSettings-MaxIdleCount-Zero", func(t *testing.T) { testMaxIdleCount(t, nil, 0, 0) }) + t.Run("WithSettings-MaxIdleCount-Zero", func(t *testing.T) { testMaxIdleCount(t, sFoo, 0, 0) }) +} + func TestCreateFail(t *testing.T) { var state TestState state.chaos.failConnect = true diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index cfc2071a746..d449b4c64f7 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -380,11 +380,11 @@ func ExecuteOnTablet(t *testing.T, query string, vttablet Vttablet, ks string, e _, _ = vttablet.VttabletProcess.QueryTablet("commit", ks, true) } -func WaitForTabletSetup(vtctlClientProcess *VtctlClientProcess, expectedTablets int, expectedStatus []string) error { +func WaitForTabletSetup(vtctldClientProcess *VtctldClientProcess, expectedTablets int, expectedStatus []string) error { // wait for both tablet to get into replica state in topo waitUntil := time.Now().Add(10 * time.Second) for time.Now().Before(waitUntil) { - result, err := vtctlClientProcess.ExecuteCommandWithOutput("ListAllTablets") + result, err := vtctldClientProcess.ExecuteCommandWithOutput("GetTablets") if err != nil { return err } diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index 1290156a1cd..2adbdf13250 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -28,6 +28,7 @@ import ( "strconv" "strings" "syscall" + "testing" "time" "vitess.io/vitess/go/vt/log" @@ -57,6 +58,8 @@ type VtgateProcess struct { Directory string VerifyURL string VSchemaURL string + ConfigFile string + Config VTGateConfiguration SysVarSetEnabled bool PlannerVersion plancontext.PlannerVersion // Extra Args to be set before starting the vtgate process @@ -66,6 +69,81 @@ type VtgateProcess struct { exit chan error } +type VTGateConfiguration struct { + TransactionMode string `json:"transaction_mode,omitempty"` + DiscoveryLowReplicationLag string `json:"discovery_low_replication_lag,omitempty"` + DiscoveryHighReplicationLag string `json:"discovery_high_replication_lag,omitempty"` + DiscoveryMinServingVttablets string `json:"discovery_min_number_serving_vttablets,omitempty"` + DiscoveryLegacyReplicationLagAlgo string `json:"discovery_legacy_replication_lag_algorithm"` +} + +// ToJSONString will marshal this configuration as JSON +func (config *VTGateConfiguration) ToJSONString() string { + b, _ := json.MarshalIndent(config, "", "\t") + return string(b) +} + +func (vtgate *VtgateProcess) RewriteConfiguration() error { + return os.WriteFile(vtgate.ConfigFile, []byte(vtgate.Config.ToJSONString()), 0644) +} + +// WaitForConfig waits for the expectedConfig to be present in the vtgate configuration. +func (vtgate *VtgateProcess) WaitForConfig(expectedConfig string) error { + timeout := time.After(30 * time.Second) + var response string + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for api to work. Last response - %s", response) + default: + _, response, _ = vtgate.MakeAPICall("/debug/config") + if strings.Contains(response, expectedConfig) { + return nil + } + time.Sleep(1 * time.Second) + } + } +} + +// MakeAPICall makes an API call on the given endpoint of VTOrc +func (vtgate *VtgateProcess) MakeAPICall(endpoint string) (status int, response string, err error) { + url := fmt.Sprintf("http://localhost:%d/%s", vtgate.Port, endpoint) + resp, err := http.Get(url) + if err != nil { + if resp != nil { + status = resp.StatusCode + } + return status, "", err + } + defer func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }() + + respByte, _ := io.ReadAll(resp.Body) + return resp.StatusCode, string(respByte), err +} + +// MakeAPICallRetry is used to make an API call and retries until success +func (vtgate *VtgateProcess) MakeAPICallRetry(t *testing.T, url string) { + t.Helper() + timeout := time.After(10 * time.Second) + for { + select { + case <-timeout: + t.Fatal("timed out waiting for api to work") + return + default: + status, _, err := vtgate.MakeAPICall(url) + if err == nil && status == 200 { + return + } + time.Sleep(1 * time.Second) + } + } +} + const defaultVtGatePlannerVersion = planbuilder.Gen4 // Setup starts Vtgate process with required arguements @@ -74,6 +152,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { "--topo_implementation", vtgate.CommonArg.TopoImplementation, "--topo_global_server_address", vtgate.CommonArg.TopoGlobalAddress, "--topo_global_root", vtgate.CommonArg.TopoGlobalRoot, + "--config-file", vtgate.ConfigFile, "--log_dir", vtgate.LogDir, "--log_queries_to_file", vtgate.FileToLogQueries, "--port", fmt.Sprintf("%d", vtgate.Port), @@ -98,6 +177,19 @@ func (vtgate *VtgateProcess) Setup() (err error) { break } } + configFile, err := os.Create(vtgate.ConfigFile) + if err != nil { + log.Errorf("cannot create config file for vtgate: %v", err) + return err + } + _, err = configFile.WriteString(vtgate.Config.ToJSONString()) + if err != nil { + return err + } + err = configFile.Close() + if err != nil { + return err + } if !msvflag { version, err := mysqlctl.GetVersionString() if err != nil { @@ -287,6 +379,7 @@ func VtgateProcessInstance( Name: "vtgate", Binary: "vtgate", FileToLogQueries: path.Join(tmpDirectory, "/vtgate_querylog.txt"), + ConfigFile: path.Join(tmpDirectory, fmt.Sprintf("vtgate-config-%d.json", port)), Directory: os.Getenv("VTDATAROOT"), ServiceMap: "grpc-tabletmanager,grpc-throttler,grpc-queryservice,grpc-updatestream,grpc-vtctl,grpc-vtgateservice", LogDir: tmpDirectory, diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index f65301b9bb4..e8a11ceed07 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -142,18 +143,18 @@ func TestDurabilityPolicyField(t *testing.T) { out, err := vtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "ks_durability", "--durability-policy=semi_sync") require.NoError(t, err, out) - checkDurabilityPolicy(t, "semi_sync") + checkDurabilityPolicy(t, policy.DurabilitySemiSync) out, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", "ks_durability", "--durability-policy=none") require.NoError(t, err, out) - checkDurabilityPolicy(t, "none") + checkDurabilityPolicy(t, policy.DurabilityNone) out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability") require.NoError(t, err, out) - checkDurabilityPolicy(t, "semi_sync") + checkDurabilityPolicy(t, policy.DurabilitySemiSync) out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) @@ -277,24 +278,24 @@ func TestDeleteKeyspace(t *testing.T) { // TODO: (ajm188) if this test gets fixed, the flags need to be updated to comply with VEP-4 as well. // tells that in zone2 after deleting shard, there is no shard #264 and in zone1 there is only 1 #269 /*func RemoveKeyspaceCell(t *testing.T) { - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace_removekscell/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace_removekscell/1") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone1-0000000100", "primary") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone1-0000000101", "primary") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone2-0000000100", "replica") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone2-0000000101", "replica") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace_removekscell/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace_removekscell/1") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone1-0000000100", "primary") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone1-0000000101", "primary") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone2-0000000100", "replica") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone2-0000000101", "replica") // Create the serving/replication entries and check that they exist, so we can later check they're deleted. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/1") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", "zone2", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", "zone1", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/1") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", "zone2", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", "zone1", "test_delete_keyspace_removekscell") // Just remove the shard from one cell (including tablets), // but leaving the global records and other cells/shards alone. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RemoveShardCell", "--recursive", "test_delete_keyspace_removekscell/0", "zone2") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RemoveShardCell", "--recursive", "test_delete_keyspace_removekscell/0", "zone2") //Check that the shard is gone from zone2. srvKeyspaceZone2 := getSrvKeyspace(t, cell2, "test_delete_keyspace_removekscell") @@ -308,42 +309,42 @@ func TestDeleteKeyspace(t *testing.T) { assert.Equal(t, len(partition.ShardReferences), 2) } - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShard", "test_delete_keyspace_removekscell/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShard", "test_delete_keyspace_removekscell/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") - err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetTablet", "zone2-0000000100") + err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetTablet", "zone2-0000000100") require.Error(t, err) - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetTablet", "zone2-0000000101") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone1", "test_delete_keyspace_removekscell/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetTablet", "zone2-0000000101") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone1", "test_delete_keyspace_removekscell/0") - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") require.Error(t, err) - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/1") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", "zone2", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/1") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", "zone2", "test_delete_keyspace_removekscell") // Add it back to do another test. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone2-0000000100", "replica") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("InitTablet", "--port=1234", "--keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone2-0000000100", "replica") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") // Now use RemoveKeyspaceCell to remove all shards. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RemoveKeyspaceCell", "-recursive", "test_delete_keyspace_removekscell", "zone2") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone1", "test_delete_keyspace_removekscell/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RemoveKeyspaceCell", "-recursive", "test_delete_keyspace_removekscell", "zone2") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone1", "test_delete_keyspace_removekscell/0") - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/0") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/1") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", "zone2", "test_delete_keyspace_removekscell/1") require.Error(t, err) // Clean up - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "-recursive", "test_delete_keyspace_removekscell") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "-recursive", "test_delete_keyspace_removekscell") } */ func TestShardCountForAllKeyspaces(t *testing.T) { diff --git a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go index ee8141860f4..d34b63b833b 100644 --- a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go +++ b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go @@ -244,7 +244,7 @@ func TestOnlineDDLFlow(t *testing.T) { select { case <-ticker.C: case <-workloadCtx.Done(): - t.Logf("Terminating routine throttler check") + fmt.Println("Terminating routine throttler check") return } } @@ -258,8 +258,8 @@ func TestOnlineDDLFlow(t *testing.T) { wg.Add(1) go func() { defer cancel() - defer t.Logf("Terminating workload") defer wg.Done() + defer fmt.Println("Terminating workload") runMultipleConnections(workloadCtx, t) }() }) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 0d2eb8935d2..37855a47df6 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -28,10 +28,11 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) func TestTrivialERS(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -55,7 +56,7 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -96,7 +97,7 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -131,7 +132,7 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -166,7 +167,7 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -193,7 +194,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "none") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -222,7 +223,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -249,7 +250,7 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -271,7 +272,7 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error @@ -342,7 +343,7 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -441,7 +442,7 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -468,7 +469,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -507,7 +508,7 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index a041ca04c68..fc5db965847 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) // TestRecoverWithMultipleVttabletFailures tests that ERS succeeds with the default values @@ -36,7 +37,7 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -67,7 +68,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - clusterInstance := utils.SetupReparentCluster(t, "none") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets // Confirm that the replication is setup correctly in the beginning. @@ -102,7 +103,7 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -114,7 +115,7 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "none") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityNone) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -159,7 +160,7 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { // TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a // write that happens when PromoteReplica is called. func TestERSWithWriteInPromoteReplica(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -176,7 +177,7 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) { } func TestBufferingWithMultipleDisruptions(t *testing.T) { - clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupShardedReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) // Stop all VTOrc instances, so that they don't interfere with the test. diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 94e37d715f4..7b750dc3f16 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -33,10 +33,11 @@ import ( "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -47,7 +48,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -60,7 +61,7 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -82,7 +83,7 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -108,7 +109,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -125,7 +126,7 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.DeleteTablet(t, clusterInstance, tablets[2]) @@ -172,13 +173,13 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -277,7 +278,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -323,7 +324,7 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -389,7 +390,7 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "cross_cell") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilityCrossCell) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -428,7 +429,7 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go index df9bf192e65..804a1645f19 100644 --- a/go/test/endtoend/reparent/semisync/semi_sync_test.go +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) func TestSemiSyncUpgradeDowngrade(t *testing.T) { @@ -33,7 +34,7 @@ func TestSemiSyncUpgradeDowngrade(t *testing.T) { if ver != 21 { t.Skip("We only want to run this test for v21 release") } - clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + clusterInstance := utils.SetupReparentCluster(t, policy.DurabilitySemiSync) defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 2a51262557b..5fa06c9be4c 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -32,6 +32,7 @@ import ( "github.com/stretchr/testify/require" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vttablet/tabletconn" "vitess.io/vitess/go/mysql" @@ -71,7 +72,7 @@ func SetupReparentCluster(t *testing.T, durability string) *cluster.LocalProcess // SetupRangeBasedCluster sets up the range based cluster func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalProcessCluster { - return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, "semi_sync") + return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, policy.DurabilitySemiSync) } // SetupShardedReparentCluster is used to setup a sharded cluster for testing @@ -677,7 +678,7 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces assert.Len(t, result[cell1].Nodes, 2) } } else { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, KeyspaceShard) + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, KeyspaceShard) require.Nil(t, err, "error should be Nil") if !downPrimary { assertNodeCount(t, result, int(3)) diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 3e5f2b3add7..ba7884de40f 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -153,8 +153,8 @@ func TestShardedKeyspace(t *testing.T) { err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateSchemaShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ValidateSchemaKeyspace", keyspaceName) - require.Error(t, err) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ValidateSchemaKeyspace", keyspaceName) + require.NoError(t, err) // We should assert that there is a schema difference and that both the shard primaries are involved in it. // However, we cannot assert in which order the two primaries will occur since the underlying function does not guarantee that // We could have an output here like `schemas differ ... shard1Primary ... differs from: shard2Primary ...` or `schemas differ ... shard2Primary ... differs from: shard1Primary ...` diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go index a426355e01c..162388c83e0 100644 --- a/go/test/endtoend/throttler/util.go +++ b/go/test/endtoend/throttler/util.go @@ -98,7 +98,7 @@ func GetThrottlerStatusRaw(vtctldProcess *cluster.VtctldClientProcess, tablet *c return result, err } -// UpdateThrottlerTopoConfig runs vtctlclient UpdateThrottlerConfig. +// UpdateThrottlerTopoConfig runs vtctldclient UpdateThrottlerConfig. // This retries the command until it succeeds or times out as the // SrvKeyspace record may not yet exist for a newly created // Keyspace that is still initializing before it becomes serving. @@ -218,7 +218,7 @@ func GetThrottlerStatus(vtctldProcess *cluster.VtctldClientProcess, tablet *clus return resp.Status, err } -// UpdateThrottlerTopoConfig runs vtctlclient UpdateThrottlerConfig. +// UpdateThrottlerTopoConfig runs vtctldclient UpdateThrottlerConfig. // This retries the command until it succeeds or times out as the // SrvKeyspace record may not yet exist for a newly created // Keyspace that is still initializing before it becomes serving. @@ -272,7 +272,7 @@ func WaitForSrvKeyspace(clusterInstance *cluster.LocalProcessCluster, cell, keys } } -// throttleAppRaw runs vtctlclient UpdateThrottlerConfig with --throttle-app flags +// throttleAppRaw runs vtctldclient UpdateThrottlerConfig with --throttle-app flags // This retries the command until it succeeds or times out as the // SrvKeyspace record may not yet exist for a newly created // Keyspace that is still initializing before it becomes serving. diff --git a/go/test/endtoend/transaction/benchmark/bench_test.go b/go/test/endtoend/transaction/benchmark/bench_test.go index a42c9bca9c1..553919f893e 100644 --- a/go/test/endtoend/transaction/benchmark/bench_test.go +++ b/go/test/endtoend/transaction/benchmark/bench_test.go @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -65,7 +66,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 1, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go index 4d168fbdde0..3516bdefe05 100644 --- a/go/test/endtoend/transaction/twopc/fuzz/main_test.go +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -78,7 +79,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -89,7 +90,7 @@ func TestMain(m *testing.M) { Name: unshardedKeyspaceName, SchemaSQL: "", VSchema: "{}", - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go index 58fe45547a5..7a2f7e8676e 100644 --- a/go/test/endtoend/transaction/twopc/main_test.go +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -32,6 +32,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -77,7 +78,6 @@ func TestMain(m *testing.M) { // Set extra args for twopc clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, - "--transaction_mode", "TWOPC", "--grpc_use_effective_callerid", ) clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, @@ -93,7 +93,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -103,6 +103,13 @@ func TestMain(m *testing.M) { if err := clusterInstance.StartVtgate(); err != nil { return 1 } + clusterInstance.VtgateProcess.Config.TransactionMode = "TWOPC" + if err := clusterInstance.VtgateProcess.RewriteConfiguration(); err != nil { + return 1 + } + if err := clusterInstance.VtgateProcess.WaitForConfig(`"transaction_mode":"TWOPC"`); err != nil { + return 1 + } vtParams = clusterInstance.GetVTParams(keyspaceName) vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go index 61a43017ef9..0018f5d45d8 100644 --- a/go/test/endtoend/transaction/twopc/metric/main_test.go +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -78,7 +79,7 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, SidecarDBName: sidecarDBName, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go index 4da4f86bdff..977fa3f6fd6 100644 --- a/go/test/endtoend/transaction/twopc/stress/main_test.go +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -80,7 +81,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { return 1 @@ -91,7 +92,7 @@ func TestMain(m *testing.M) { Name: unshardedKeyspaceName, SchemaSQL: "", VSchema: "{}", - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { return 1 diff --git a/go/test/endtoend/transaction/twopc/twopc_test.go b/go/test/endtoend/transaction/twopc/twopc_test.go index a760cfb24b3..b7f7c11fba9 100644 --- a/go/test/endtoend/transaction/twopc/twopc_test.go +++ b/go/test/endtoend/transaction/twopc/twopc_test.go @@ -44,6 +44,38 @@ import ( "vitess.io/vitess/go/vt/vttablet/grpctmclient" ) +// TestDynamicConfig tests that transaction mode is dynamically configurable. +func TestDynamicConfig(t *testing.T) { + conn, closer := start(t) + defer closer() + defer conn.Close() + + // Ensure that initially running a distributed transaction is possible. + utils.Exec(t, conn, "begin") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(4, 4)") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(6, 4)") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(9, 4)") + utils.Exec(t, conn, "commit") + + clusterInstance.VtgateProcess.Config.TransactionMode = "SINGLE" + defer func() { + clusterInstance.VtgateProcess.Config.TransactionMode = "TWOPC" + err := clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + }() + err := clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + err = clusterInstance.VtgateProcess.WaitForConfig(`"transaction_mode":"SINGLE"`) + require.NoError(t, err) + + // After the config changes verify running a distributed transaction fails. + utils.Exec(t, conn, "begin") + utils.Exec(t, conn, "insert into twopc_t1(id, col) values(20, 4)") + _, err = utils.ExecAllowError(t, conn, "insert into twopc_t1(id, col) values(22, 4)") + require.ErrorContains(t, err, "multi-db transaction attempted") + utils.Exec(t, conn, "rollback") +} + // TestDTCommit tests distributed transaction commit for insert, update and delete operations // It verifies the binlog events for the same with transaction state changes and redo statements. func TestDTCommit(t *testing.T) { diff --git a/go/test/endtoend/transaction/tx_test.go b/go/test/endtoend/transaction/tx_test.go index 89531952b13..fd162cb3d41 100644 --- a/go/test/endtoend/transaction/tx_test.go +++ b/go/test/endtoend/transaction/tx_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -69,7 +70,7 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: SchemaSQL, VSchema: VSchema, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1, err diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go index dd9614e79fa..b2e1eca03e9 100644 --- a/go/test/endtoend/utils/cmp.go +++ b/go/test/endtoend/utils/cmp.go @@ -215,6 +215,18 @@ func (mcmp *MySQLCompare) Exec(query string) *sqltypes.Result { return vtQr } +// ExecVitessAndMySQLDifferentQueries executes Vitess and MySQL with the queries provided. +func (mcmp *MySQLCompare) ExecVitessAndMySQLDifferentQueries(vtQ, mQ string) *sqltypes.Result { + mcmp.t.Helper() + vtQr, err := mcmp.VtConn.ExecuteFetch(vtQ, 1000, true) + require.NoError(mcmp.t, err, "[Vitess Error] for query: "+vtQ) + + mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(mQ, 1000, true) + require.NoError(mcmp.t, err, "[MySQL Error] for query: "+mQ) + compareVitessAndMySQLResults(mcmp.t, vtQ, mcmp.VtConn, vtQr, mysqlQr, CompareOptions{}) + return vtQr +} + // ExecAssert is the same as Exec, but it only does assertions, it won't FailNow func (mcmp *MySQLCompare) ExecAssert(query string) *sqltypes.Result { mcmp.t.Helper() diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 4b4bcfecc35..3e7dd08f31c 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -29,7 +29,7 @@ import ( // 1. Composite or multi-column primary keys // 2. PKs that contain an ENUM column // 3. That we properly handle tables with auto_increment columns (which are stripped by default when -// moving the table to a sharded keyspace with vtctldclient and left in place when using vtctlclient) +// moving the table to a sharded keyspace) // // The Lead and Lead-1 tables also allows us to test several things: // 1. Mixed case identifiers diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go index e17247ab46b..d6716bcbf2d 100644 --- a/go/test/endtoend/vreplication/fk_ext_test.go +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -151,8 +151,7 @@ func TestFKExt(t *testing.T) { } sqls := strings.Split(FKExtSourceSchema, "\n") for _, sql := range sqls { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplySchema", "--", - "--ddl_strategy=direct", "--sql", sql, keyspaceName) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy=direct", "--sql", sql, keyspaceName) require.NoErrorf(t, err, output) } doReshard(t, fkextConfig.target2KeyspaceName, "reshard2to3", "-80,80-", threeShards, tablets) @@ -165,8 +164,7 @@ func TestFKExt(t *testing.T) { tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID)].Vttablet sqls := strings.Split(FKExtSourceSchema, "\n") for _, sql := range sqls { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplySchema", "--", - "--ddl_strategy=direct", "--sql", sql, keyspaceName) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy=direct", "--sql", sql, keyspaceName) require.NoErrorf(t, err, output) } doReshard(t, fkextConfig.target2KeyspaceName, "reshard3to1", threeShards, "0", tablets) @@ -254,7 +252,7 @@ func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards for _, targetTab := range targetTabs { catchup(t, targetTab, workflowName, "Reshard") } - vdiff(t, keyspace, workflowName, fkextConfig.cell, false, true, nil) + vdiff(t, keyspace, workflowName, fkextConfig.cell, nil) rs.SwitchReadsAndWrites() //if lg.WaitForAdditionalRows(100) != nil { // t.Fatal("WaitForAdditionalRows failed") @@ -263,7 +261,7 @@ func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards if compareRowCounts(t, keyspace, strings.Split(sourceShards, ","), strings.Split(targetShards, ",")) != nil { t.Fatal("Row counts do not match") } - vdiff(t, keyspace, workflowName+"_reverse", fkextConfig.cell, true, false, nil) + vdiff(t, keyspace, workflowName+"_reverse", fkextConfig.cell, nil) rs.ReverseReadsAndWrites() //if lg.WaitForAdditionalRows(100) != nil { @@ -273,7 +271,7 @@ func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards if compareRowCounts(t, keyspace, strings.Split(targetShards, ","), strings.Split(sourceShards, ",")) != nil { t.Fatal("Row counts do not match") } - vdiff(t, keyspace, workflowName, fkextConfig.cell, false, true, nil) + vdiff(t, keyspace, workflowName, fkextConfig.cell, nil) lg.Stop() rs.SwitchReadsAndWrites() @@ -313,12 +311,10 @@ const fkExtMaterializeSpec = ` func materializeTables(t *testing.T) { wfName := "mat" - err := vc.VtctlClient.ExecuteCommand("ApplySchema", "--", "--ddl_strategy=direct", - "--sql", FKExtMaterializeSchema, fkextConfig.target1KeyspaceName) + err := vc.VtctldClient.ExecuteCommand("ApplySchema", "--ddl-strategy=direct", "--sql", FKExtMaterializeSchema, fkextConfig.target1KeyspaceName) require.NoError(t, err, fmt.Sprintf("ApplySchema Error: %s", err)) materializeSpec := fmt.Sprintf(fkExtMaterializeSpec, "mat", fkextConfig.target2KeyspaceName, fkextConfig.target1KeyspaceName) - err = vc.VtctlClient.ExecuteCommand("Materialize", materializeSpec) - require.NoError(t, err, "Materialize") + materialize(t, materializeSpec) tab := vc.getPrimaryTablet(t, fkextConfig.target1KeyspaceName, "0") catchup(t, tab, wfName, "Materialize") validateMaterializeRowCounts(t) @@ -363,7 +359,7 @@ func doMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, ta for _, targetTab := range targetTabs { catchup(t, targetTab, workflowName, "MoveTables") } - vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, false, true, nil) + vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, nil) lg.Stop() lg.SetDBStrategy("vtgate", targetKeyspace) if lg.Start() != nil { @@ -377,7 +373,7 @@ func doMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, ta } waitForLowLag(t, sourceKeyspace, workflowName+"_reverse") - vdiff(t, sourceKeyspace, workflowName+"_reverse", fkextConfig.cell, false, true, nil) + vdiff(t, sourceKeyspace, workflowName+"_reverse", fkextConfig.cell, nil) if lg.WaitForAdditionalRows(100) != nil { t.Fatal("WaitForAdditionalRows failed") } @@ -388,7 +384,7 @@ func doMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, ta } waitForLowLag(t, targetKeyspace, workflowName) time.Sleep(5 * time.Second) - vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, false, true, nil) + vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, nil) lg.Stop() mt.SwitchReadsAndWrites() mt.Complete() diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index f977d5a74cd..15664be51d9 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -102,11 +102,11 @@ func TestFKWorkflow(t *testing.T) { targetTab := targetKs.Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet require.NotNil(t, targetTab) catchup(t, targetTab, workflowName, "MoveTables") - vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + vdiff(t, targetKeyspace, workflowName, cellName, nil) if withLoad { ls.waitForAdditionalRows(200) } - vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + vdiff(t, targetKeyspace, workflowName, cellName, nil) if withLoad { cancel() <-ch diff --git a/go/test/endtoend/vreplication/global_routing_test.go b/go/test/endtoend/vreplication/global_routing_test.go new file mode 100644 index 00000000000..667c6352e2e --- /dev/null +++ b/go/test/endtoend/vreplication/global_routing_test.go @@ -0,0 +1,296 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "bytes" + "fmt" + "strings" + "testing" + "text/template" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + vttablet "vitess.io/vitess/go/vt/vttablet/common" +) + +type tgrTestConfig struct { + ksU1, ksU2, ksS1 string + ksU1Tables, ksU2Tables, ksS1Tables []string +} + +var grTestConfig = tgrTestConfig{ + ksU1: "unsharded1", + ksU2: "unsharded2", + ksS1: "sharded1", + ksU1Tables: []string{"t1", "t2", "t3"}, + ksU2Tables: []string{"t2", "t4", "t5"}, + ksS1Tables: []string{"t2", "t4", "t6"}, +} + +type grTestExpectations struct { + postKsU1, postKsU2, postKsS1 func(t *testing.T) +} + +// Scope helpers to this test file so we don't pollute the global namespace. +type grHelpers struct { + t *testing.T +} + +func (h *grHelpers) getSchema(tables []string) string { + var createSQL string + for _, table := range tables { + createSQL += fmt.Sprintf("CREATE TABLE %s (id int primary key, val varchar(32)) ENGINE=InnoDB;\n", table) + } + return createSQL +} + +func (h *grHelpers) getShardedVSchema(tables []string) string { + const vSchemaTmpl = `{ + "sharded": true, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + {{- range $i, $table := .Tables}} + {{- if gt $i 0}},{{end}} + "{{ $table }}": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + } + {{- end}} + } +} +` + type VSchemaData struct { + Tables []string + } + tmpl, err := template.New("vschema").Parse(vSchemaTmpl) + require.NoError(h.t, err) + var buf bytes.Buffer + err = tmpl.Execute(&buf, VSchemaData{tables}) + require.NoError(h.t, err) + return buf.String() +} + +func (h *grHelpers) insertData(t *testing.T, keyspace string, table string, id int, val string) { + vtgateConn, cancel := getVTGateConn() + defer cancel() + _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s.%s(id, val) values(%d, '%s')", + keyspace, table, id, val), 1, false) + require.NoError(t, err) +} + +// There is a race between when a table is created and it is updated in the global table cache in vtgate. +// This function waits for the table to be available in vtgate before proceeding. +func (h *grHelpers) waitForTableAvailability(t *testing.T, vtgateConn *mysql.Conn, table string) { + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("select * from %s", table), 1, false) + if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("table %s not found", table)) { + return + } + select { + case <-timer.C: + require.FailNow(t, "timed out waiting for table availability for %s", table) + default: + time.Sleep(defaultTick) + } + } +} + +// Check for the expected global routing behavior for the given tables. Expected logic is implemented in the callback. +func (h *grHelpers) checkForTable( + t *testing.T, + tables []string, + queryCallback func(rs *sqltypes.Result, err error), +) { + vtgateConn, cancel := getVTGateConn() + defer cancel() + + for _, table := range tables { + for _, target := range []string{"", "@primary"} { + _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", target), 1, false) + require.NoError(t, err) + h.waitForTableAvailability(t, vtgateConn, table) + rs, err := vtgateConn.ExecuteFetch(fmt.Sprintf("select * from %s", table), 1, false) + queryCallback(rs, err) + } + } +} + +func (h *grHelpers) isGlobal(t *testing.T, tables []string, expectedVal string) bool { + asExpected := true + + h.checkForTable(t, tables, func(rs *sqltypes.Result, err error) { + require.NoError(t, err) + gotVal := rs.Rows[0][1].ToString() + if gotVal != expectedVal { + asExpected = false + } + }) + + return asExpected +} + +func (h *grHelpers) isAmbiguous(t *testing.T, tables []string) bool { + asExpected := true + + h.checkForTable(t, tables, func(rs *sqltypes.Result, err error) { + if err == nil || !strings.Contains(err.Error(), "ambiguous") { + asExpected = false + } + }) + + return asExpected +} + +// getExpectations returns a map of expectations for global routing tests. The key is a boolean indicating whether +// the unsharded keyspace has a vschema. The value is a struct containing callbacks for verifying the global routing +// behavior after each keyspace is added. +func (h *grHelpers) getExpectations() *map[bool]*grTestExpectations { + var exp = make(map[bool]*grTestExpectations) + exp[false] = &grTestExpectations{ + postKsU1: func(t *testing.T) { + require.True(t, h.isGlobal(t, []string{"t1", "t2", "t3"}, grTestConfig.ksU1)) + }, + postKsU2: func(t *testing.T) { + require.True(t, h.isGlobal(t, []string{"t1", "t3"}, grTestConfig.ksU1)) + require.True(t, h.isGlobal(t, []string{"t4", "t5"}, grTestConfig.ksU2)) + require.True(t, h.isAmbiguous(t, []string{"t2"})) + }, + postKsS1: func(t *testing.T) { + require.True(t, h.isGlobal(t, []string{"t2", "t4"}, grTestConfig.ksS1)) + require.True(t, h.isGlobal(t, []string{"t1", "t3"}, grTestConfig.ksU1)) + require.True(t, h.isGlobal(t, []string{"t5"}, grTestConfig.ksU2)) + require.True(t, h.isGlobal(t, []string{"t6"}, grTestConfig.ksS1)) + }, + } + exp[true] = &grTestExpectations{ + postKsU1: func(t *testing.T) { + require.True(t, h.isGlobal(t, []string{"t1", "t2", "t3"}, grTestConfig.ksU1)) + }, + postKsU2: func(t *testing.T) { + require.True(t, h.isGlobal(t, []string{"t1", "t3"}, grTestConfig.ksU1)) + require.True(t, h.isGlobal(t, []string{"t4", "t5"}, grTestConfig.ksU2)) + require.True(t, h.isAmbiguous(t, []string{"t2"})) + }, + postKsS1: func(t *testing.T) { + require.True(t, h.isAmbiguous(t, []string{"t2", "t4"})) + require.True(t, h.isGlobal(t, []string{"t1", "t3"}, grTestConfig.ksU1)) + require.True(t, h.isGlobal(t, []string{"t5"}, grTestConfig.ksU2)) + }, + } + return &exp +} + +func (h *grHelpers) getUnshardedVschema(unshardedHasVSchema bool, tables []string) string { + if !unshardedHasVSchema { + return "" + } + vschema := `{"tables": {` + for i, table := range tables { + if i != 0 { + vschema += `,` + } + vschema += fmt.Sprintf(`"%s": {}`, table) + } + vschema += `}}` + return vschema +} + +func (h *grHelpers) rebuildGraphs(t *testing.T, keyspaces []string) { + var err error + for _, ks := range keyspaces { + err = vc.VtctldClient.ExecuteCommand("RebuildKeyspaceGraph", ks) + require.NoError(t, err) + } + require.NoError(t, err) + err = vc.VtctldClient.ExecuteCommand("RebuildVSchemaGraph") + require.NoError(t, err) +} + +// TestGlobalRouting tests global routing for unsharded and sharded keyspaces by setting up keyspaces +// with different table configurations and verifying that the tables are globally routed +// by querying via vtgate. +func TestGlobalRouting(t *testing.T) { + h := grHelpers{t} + exp := *h.getExpectations() + for unshardedHasVSchema, funcs := range exp { + require.NotNil(t, funcs) + testGlobalRouting(t, unshardedHasVSchema, funcs) + } +} + +func testGlobalRouting(t *testing.T, unshardedHasVSchema bool, funcs *grTestExpectations) { + h := grHelpers{t: t} + setSidecarDBName("_vt") + vttablet.InitVReplicationConfigDefaults() + + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + zone1 := vc.Cells["zone1"] + config := grTestConfig + vc.AddKeyspace(t, []*Cell{zone1}, config.ksU1, "0", h.getUnshardedVschema(unshardedHasVSchema, config.ksU1Tables), + h.getSchema(config.ksU1Tables), 1, 0, 100, nil) + verifyClusterHealth(t, vc) + for _, table := range config.ksU1Tables { + h.insertData(t, config.ksU1, table, 1, config.ksU1) + vtgateConn, cancel := getVTGateConn() + waitForRowCount(t, vtgateConn, config.ksU1+"@replica", table, 1) + cancel() + } + keyspaces := []string{config.ksU1} + h.rebuildGraphs(t, keyspaces) + funcs.postKsU1(t) + + vc.AddKeyspace(t, []*Cell{zone1}, config.ksU2, "0", h.getUnshardedVschema(unshardedHasVSchema, config.ksU2Tables), + h.getSchema(config.ksU2Tables), 1, 0, 200, nil) + verifyClusterHealth(t, vc) + for _, table := range config.ksU2Tables { + h.insertData(t, config.ksU2, table, 1, config.ksU2) + vtgateConn, cancel := getVTGateConn() + waitForRowCount(t, vtgateConn, config.ksU2+"@replica", table, 1) + cancel() + } + keyspaces = append(keyspaces, config.ksU2) + h.rebuildGraphs(t, keyspaces) + funcs.postKsU2(t) + + vc.AddKeyspace(t, []*Cell{zone1}, config.ksS1, "-80,80-", h.getShardedVSchema(config.ksS1Tables), h.getSchema(config.ksS1Tables), + 1, 0, 300, nil) + verifyClusterHealth(t, vc) + for _, table := range config.ksS1Tables { + h.insertData(t, config.ksS1, table, 1, config.ksS1) + vtgateConn, cancel := getVTGateConn() + waitForRowCount(t, vtgateConn, config.ksS1+"@replica", table, 1) + cancel() + } + keyspaces = append(keyspaces, config.ksS1) + h.rebuildGraphs(t, keyspaces) + funcs.postKsS1(t) +} diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index 3795b6f52d5..0102b9b5e2d 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -345,55 +345,52 @@ func assertQueryDoesNotExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet * } func waitForWorkflowToBeCreated(t *testing.T, vc *VitessCluster, ksWorkflow string) { + keyspace, workflow := parseKeyspaceWorkflow(t, ksWorkflow) require.NoError(t, waitForCondition("workflow to be created", func() bool { - _, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show") - return err == nil + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", keyspace, "show", "--workflow", workflow, "--compact", "--include-logs=false") + return err == nil && !isEmptyWorkflowShowOutput(output) }, defaultTimeout)) } // waitForWorkflowState waits for all of the given workflow's // streams to reach the provided state. You can pass optional // key value pairs of the form "key==value" to also wait for -// additional stream sub-state such as "Message==for vdiff". +// additional stream sub-state such as "message==for vdiff". // Invalid checks are ignored. func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wantState string, fieldEqualityChecks ...string) { + keyspace, workflow := parseKeyspaceWorkflow(t, ksWorkflow) done := false timer := time.NewTimer(workflowStateTimeout) + defer timer.Stop() log.Infof("Waiting for workflow %q to fully reach %q state", ksWorkflow, wantState) for { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", keyspace, "show", "--workflow", workflow, "--compact", "--include-logs=false") require.NoError(t, err, output) done = true state := "" - result := gjson.Get(output, "ShardStatuses") - result.ForEach(func(tabletId, tabletStreams gjson.Result) bool { // for each participating tablet - tabletStreams.ForEach(func(streamId, streamInfos gjson.Result) bool { // for each stream - if streamId.String() == "PrimaryReplicationStatuses" { - streamInfos.ForEach(func(attributeKey, attributeValue gjson.Result) bool { // for each attribute in the stream - // we need to wait for all streams to have the desired state - state = attributeValue.Get("State").String() - if state == wantState { - for i := 0; i < len(fieldEqualityChecks); i++ { - if kvparts := strings.Split(fieldEqualityChecks[i], "=="); len(kvparts) == 2 { - key := kvparts[0] - val := kvparts[1] - res := attributeValue.Get(key).String() - if !strings.EqualFold(res, val) { - done = false - } - } - } - if wantState == binlogdatapb.VReplicationWorkflowState_Running.String() && attributeValue.Get("Pos").String() == "" { - done = false - } - } else { + streams := gjson.Get(output, "workflows.0.shard_streams.*.streams") + streams.ForEach(func(streamId, stream gjson.Result) bool { // For each stream + info := stream.Map() + // We need to wait for all streams to have the desired state. + state = info["state"].String() + if state == wantState { + for i := 0; i < len(fieldEqualityChecks); i++ { + if kvparts := strings.Split(fieldEqualityChecks[i], "=="); len(kvparts) == 2 { + key := kvparts[0] + val := kvparts[1] + res := info[key].String() + if !strings.EqualFold(res, val) { done = false } - return true - }) + } } - return true - }) + if wantState == binlogdatapb.VReplicationWorkflowState_Running.String() && + (info["position"].Exists() && info["position"].String() == "") { + done = false + } + } else { + done = false + } return true }) if done { @@ -421,7 +418,8 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa // were re-added by the time the workflow hits the running phase. // For a Reshard workflow, where no tables are specified, pass // an empty string for the tables and all tables in the target -// keyspace will be checked. +// keyspace will be checked. It checks for the expected state until +// the timeout is reached. func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletProcess, ksName string, tables string) { require.NotNil(t, tablets) require.NotNil(t, tablets[0]) @@ -438,36 +436,52 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro tableArr = append(tableArr, row[0].ToString()) } } - for _, tablet := range tablets { - // Be sure that the schema is up to date. - err := vc.VtctldClient.ExecuteCommand("ReloadSchema", topoproto.TabletAliasString(&topodatapb.TabletAlias{ - Cell: tablet.Cell, - Uid: uint32(tablet.TabletUID), - })) - require.NoError(t, err) - for _, table := range tableArr { - if schema.IsInternalOperationTableName(table) { - continue - } - table := strings.TrimSpace(table) - secondaryKeys := 0 - res, err := tablet.QueryTablet(fmt.Sprintf("show create table %s", sqlescape.EscapeID(table)), ksName, true) - require.NoError(t, err) - require.NotNil(t, res) - row := res.Named().Row() - tableSchema := row["Create Table"].ToString() - parsedDDL, err := sqlparser.NewTestParser().ParseStrictDDL(tableSchema) + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + tablesWithoutSecondaryKeys := make([]string, 0) + for _, tablet := range tablets { + // Be sure that the schema is up to date. + err := vc.VtctldClient.ExecuteCommand("ReloadSchema", topoproto.TabletAliasString(&topodatapb.TabletAlias{ + Cell: tablet.Cell, + Uid: uint32(tablet.TabletUID), + })) require.NoError(t, err) - createTable, ok := parsedDDL.(*sqlparser.CreateTable) - require.True(t, ok) - require.NotNil(t, createTable) - require.NotNil(t, createTable.GetTableSpec()) - for _, index := range createTable.GetTableSpec().Indexes { - if index.Info.Type != sqlparser.IndexTypePrimary { - secondaryKeys++ + for _, table := range tableArr { + if schema.IsInternalOperationTableName(table) { + continue + } + table := strings.TrimSpace(table) + secondaryKeys := 0 + res, err := tablet.QueryTablet(fmt.Sprintf("show create table %s", sqlescape.EscapeID(table)), ksName, true) + require.NoError(t, err) + require.NotNil(t, res) + row := res.Named().Row() + tableSchema := row["Create Table"].ToString() + parsedDDL, err := sqlparser.NewTestParser().ParseStrictDDL(tableSchema) + require.NoError(t, err) + createTable, ok := parsedDDL.(*sqlparser.CreateTable) + require.True(t, ok) + require.NotNil(t, createTable) + require.NotNil(t, createTable.GetTableSpec()) + for _, index := range createTable.GetTableSpec().Indexes { + if index.Info.Type != sqlparser.IndexTypePrimary { + secondaryKeys++ + } + } + if secondaryKeys == 0 { + tablesWithoutSecondaryKeys = append(tablesWithoutSecondaryKeys, table) } } - require.Greater(t, secondaryKeys, 0, "Table %s does not have any secondary keys", table) + } + if len(tablesWithoutSecondaryKeys) == 0 { + return + } + select { + case <-timer.C: + require.FailNow(t, "The following table(s) do not have any secondary keys: %s", strings.Join(tablesWithoutSecondaryKeys, ", ")) + default: + time.Sleep(defaultTick) } } } @@ -510,7 +524,7 @@ func validateDryRunResults(t *testing.T, output string, want []string) { gotDryRun := strings.Split(output, "\n") require.True(t, len(gotDryRun) > 3) var startRow int - if strings.HasPrefix(gotDryRun[1], "Parameters:") { // vtctlclient + if strings.HasPrefix(gotDryRun[1], "Parameters:") { // vtctldclient startRow = 3 } else if strings.Contains(gotDryRun[0], "deprecated") { startRow = 4 @@ -548,7 +562,7 @@ func checkIfTableExists(t *testing.T, vc *VitessCluster, tabletAlias string, tab var err error found := false - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetSchema", "--", "--tables", table, tabletAlias); err != nil { + if output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetSchema", "--tables", table, tabletAlias); err != nil { return false, err } jsonparser.ArrayEach([]byte(output), func(value []byte, dataType jsonparser.ValueType, offset int, err error) { @@ -571,19 +585,10 @@ func validateTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, ta } func isTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, table string) (bool, error) { - var output string - var err error - found := false - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetShard", ksShard); err != nil { - require.Fail(t, "GetShard error", "%v %v", err, output) - return false, err - } - jsonparser.ArrayEach([]byte(output), func(value []byte, dataType jsonparser.ValueType, offset int, err error) { - if string(value) == table { - found = true - } - }, "tablet_controls", "[0]", "denied_tables") - return found, nil + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetShard", ksShard) + require.NoError(t, err, "GetShard error", "%v %v", err, output) + deniedTable := gjson.Get(output, fmt.Sprintf("shard.tablet_controls.0.denied_tables.#(==\"%s\"", table)) + return deniedTable.Exists(), nil } // expectNumberOfStreams waits for the given number of streams to be present and @@ -609,7 +614,7 @@ func confirmAllStreamsRunning(t *testing.T, vtgateConn *mysql.Conn, database str func printShardPositions(vc *VitessCluster, ksShards []string) { for _, ksShard := range ksShards { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ShardReplicationPositions", ksShard) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ShardReplicationPositions", ksShard) if err != nil { fmt.Printf("Error in ShardReplicationPositions: %v, output %v", err, output) } else { @@ -621,7 +626,7 @@ func printShardPositions(vc *VitessCluster, ksShards []string) { func printRoutingRules(t *testing.T, vc *VitessCluster, msg string) error { var output string var err error - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules"); err != nil { + if output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact"); err != nil { return err } fmt.Printf("Routing Rules::%s:\n%s\n", msg, output) @@ -648,29 +653,22 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) { func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { timer := time.NewTimer(defaultTimeout) defer timer.Stop() - ksWorkflow := fmt.Sprintf("%s.%s", targetKS, workflow) for { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show") - require.NoError(t, err) - result := gjson.Get(output, "ShardStatuses") - result.ForEach(func(tabletId, tabletStreams gjson.Result) bool { // for each source tablet - tabletStreams.ForEach(func(streamId, streamInfos gjson.Result) bool { // for each stream - if streamId.String() == "PrimaryReplicationStatuses" { - streamInfos.ForEach(func(attributeKey, attributeValue gjson.Result) bool { // for each attribute in the stream - state := attributeValue.Get("State").String() - pos := attributeValue.Get("Pos").String() - // If we've actually copied anything then we'll have a position in the stream - if (state == binlogdatapb.VReplicationWorkflowState_Running.String() || state == binlogdatapb.VReplicationWorkflowState_Copying.String()) && pos != "" { - require.FailNowf(t, "Unexpected data copied in workflow", - "The MoveTables workflow %q copied data in less than %s when it should have been waiting. Show output: %s", - ksWorkflow, defaultTimeout, output) - } - return true // end attribute loop - }) - } - return true // end stream loop - }) - return true // end tablet loop + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", workflow, "--compact", "--include-logs=false") + require.NoError(t, err, output) + streams := gjson.Get(output, "workflows.0.shard_streams.*.streams") + streams.ForEach(func(streamId, stream gjson.Result) bool { // For each stream + info := stream.Map() + state := info["state"] + pos := info["position"] + // If we've actually copied anything then we'll have a position in the stream + if (state.Exists() && (state.String() == binlogdatapb.VReplicationWorkflowState_Running.String() || state.String() == binlogdatapb.VReplicationWorkflowState_Copying.String())) && + (pos.Exists() && pos.String() != "") { + require.FailNowf(t, "Unexpected data copied in workflow", + "The MoveTables workflow %q copied data in less than %s when it should have been waiting. Show output: %s", + ksWorkflow, defaultTimeout, output) + } + return true }) select { case <-timer.C: @@ -1071,3 +1069,21 @@ func validateOverrides(t *testing.T, tabs map[string]*cluster.VttabletProcess, w } } } + +func parseKeyspaceWorkflow(t *testing.T, ksWorkflow string) (string, string) { + t.Helper() + keyspace, workflow, ok := strings.Cut(ksWorkflow, ".") + require.True(t, ok, "invalid . value: %s", ksWorkflow) + return keyspace, workflow +} + +func isEmptyWorkflowShowOutput(output string) bool { + const ( + emptyJSON = `{}` + emptyNonCompactWorkflowShowResponse = `{ + "workflows": [] +}` + ) + v := strings.TrimSpace(output) + return v == emptyJSON || v == emptyNonCompactWorkflowShowResponse +} diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index c62099a5894..9434de9d356 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -61,7 +61,7 @@ const smMaterializeSpec = `{"workflow": "wf1", "source_keyspace": "ks1", "target const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2, 1, 'def'), (3, 2, 'def'), (4, 2, 'abc'), (5, 3, 'def'), (6, 3, 'abc')` // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters -func testShardedMaterialize(t *testing.T, useVtctldClient bool) { +func testShardedMaterialize(t *testing.T) { var err error vc = NewVitessCluster(t, nil) ks1 := "ks1" @@ -81,7 +81,7 @@ func testShardedMaterialize(t *testing.T, useVtctldClient bool) { verifyClusterHealth(t, vc) _, err = vtgateConn.ExecuteFetch(initDataQuery, 0, false) require.NoError(t, err) - materialize(t, smMaterializeSpec, useVtctldClient) + materialize(t, smMaterializeSpec) tab := vc.getPrimaryTablet(t, ks2, "0") catchup(t, tab, "wf1", "Materialize") @@ -169,7 +169,7 @@ DETERMINISTIC RETURN id * length(val); ` -func testMaterialize(t *testing.T, useVtctldClient bool) { +func testMaterialize(t *testing.T) { var err error vc = NewVitessCluster(t, nil) sourceKs := "source" @@ -199,7 +199,7 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { testMaterializeWithNonExistentTable(t) - materialize(t, smMaterializeSpec2, useVtctldClient) + materialize(t, smMaterializeSpec2) catchup(t, ks2Primary, "wf1", "Materialize") // validate data after the copy phase @@ -219,21 +219,10 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { // TestMaterialize runs all the individual materialize tests defined above. func TestMaterialize(t *testing.T) { t.Run("Materialize", func(t *testing.T) { - testMaterialize(t, false) + testMaterialize(t) }) t.Run("ShardedMaterialize", func(t *testing.T) { - testShardedMaterialize(t, false) - }) -} - -// TestMaterializeVtctldClient runs all the individual materialize tests -// defined above using vtctldclient instead of vtctlclient. -func TestMaterializeVtctldClient(t *testing.T) { - t.Run("Materialize", func(t *testing.T) { - testMaterialize(t, true) - }) - t.Run("ShardedMaterialize", func(t *testing.T) { - testShardedMaterialize(t, true) + testShardedMaterialize(t) }) } @@ -315,7 +304,7 @@ func TestReferenceTableMaterialize(t *testing.T) { waitForQueryResult(t, vtgateConn, "ks2:"+shard, "select id, id2 from ref2", `[[INT64(1) INT64(1)] [INT64(2) INT64(2)] [INT64(3) INT64(3)]]`) } - vdiff(t, "ks2", "wf1", defaultCellName, false, true, nil) + vdiff(t, "ks2", "wf1", defaultCellName, nil) queries := []string{ "update ks1.ref1 set val='xyz'", @@ -332,5 +321,5 @@ func TestReferenceTableMaterialize(t *testing.T) { waitForRowCount(t, vtgateConn, "ks2:"+shard, "ref1", 4) waitForRowCount(t, vtgateConn, "ks2:"+shard, "ref2", 4) } - vdiff(t, "ks2", "wf1", defaultCellName, false, true, nil) + vdiff(t, "ks2", "wf1", defaultCellName, nil) } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 2ccb3158fd9..86b912738bc 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -41,140 +41,13 @@ func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { }) } -// TestVtctlMigrate runs an e2e test for importing from an external cluster using the vtctl Mount and Migrate commands. +// TestMigrate runs an e2e test for importing from an external cluster using the vtctldclient Mount and Migrate commands. // We have an anti-pattern in Vitess: vt executables look for an environment variable VTDATAROOT for certain cluster parameters // like the log directory when they are created. Until this test we just needed a single cluster for e2e tests. // However now we need to create an external Vitess cluster. For this we need a different VTDATAROOT and // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT -func TestVtctlMigrate(t *testing.T) { - vc = NewVitessCluster(t, nil) - - oldDefaultReplicas := defaultReplicas - oldDefaultRdonly := defaultRdonly - defaultReplicas = 0 - defaultRdonly = 0 - defer func() { - defaultReplicas = oldDefaultReplicas - defaultRdonly = oldDefaultRdonly - }() - - defer vc.TearDown() - - defaultCell := vc.Cells[vc.CellNames[0]] - _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - require.NoError(t, err, "failed to create product keyspace") - vtgate := defaultCell.Vtgates[0] - require.NotNil(t, vtgate, "failed to get vtgate") - - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) - insertInitialData(t) - t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, vtgate, "product", 2) - }) - - // create external cluster - extCell := "extcell1" - extVc := NewVitessCluster(t, &clusterOptions{cells: []string{"extcell1"}, clusterConfig: externalClusterConfig}) - defer extVc.TearDown() - - extCell2 := extVc.Cells[extCell] - extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000, nil) - extVtgate := extCell2.Vtgates[0] - require.NotNil(t, extVtgate) - - verifyClusterHealth(t, extVc) - extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) - insertInitialDataIntoExternalCluster(t, extVtgateConn) - - var output, expected string - ksWorkflow := "product.e1" - - t.Run("mount external cluster", func(t *testing.T) { - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "--", "--type=vitess", "--topo_type=etcd2", - fmt.Sprintf("--topo_server=localhost:%d", extVc.ClusterConfig.topoPort), "--topo_root=/vitess/global", "ext1"); err != nil { - t.Fatalf("Mount command failed with %+v : %s\n", err, output) - } - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "--", "--type=vitess", "--list"); err != nil { - t.Fatalf("Mount command failed with %+v : %s\n", err, output) - } - expected = "ext1\n" - require.Equal(t, expected, output) - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "--", "--type=vitess", "--show", "ext1"); err != nil { - t.Fatalf("Mount command failed with %+v : %s\n", err, output) - } - expected = `{"ClusterName":"ext1","topo_config":{"topo_type":"etcd2","server":"localhost:12379","root":"/vitess/global"}}` + "\n" - require.Equal(t, expected, output) - }) - - t.Run("migrate from external cluster", func(t *testing.T) { - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "--", "--all", "--cells=extcell1", - "--source=ext1.rating", "create", ksWorkflow); err != nil { - t.Fatalf("Migrate command failed with %+v : %s\n", err, output) - } - waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) - waitForRowCount(t, vtgateConn, "product:0", "rating", 2) - waitForRowCount(t, vtgateConn, "product:0", "review", 3) - execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") - execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") - waitForRowCount(t, vtgateConn, "product:0", "rating", 3) - waitForRowCount(t, vtgateConn, "product:0", "review", 4) - vdiffSideBySide(t, ksWorkflow, "extcell1") - - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "complete", ksWorkflow); err != nil { - t.Fatalf("Migrate command failed with %+v : %s\n", err, output) - } - - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) - }) - t.Run("cancel migrate workflow", func(t *testing.T) { - execVtgateQuery(t, vtgateConn, "product", "drop table review,rating") - - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "--", "--all", "--auto_start=false", "--cells=extcell1", - "--source=ext1.rating", "create", ksWorkflow); err != nil { - t.Fatalf("Migrate command failed with %+v : %s\n", err, output) - } - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) - waitForRowCount(t, vtgateConn, "product:0", "rating", 0) - waitForRowCount(t, vtgateConn, "product:0", "review", 0) - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "cancel", ksWorkflow); err != nil { - t.Fatalf("Migrate command failed with %+v : %s\n", err, output) - } - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) - var found bool - found, err = checkIfTableExists(t, vc, "zone1-100", "review") - require.NoError(t, err) - require.False(t, found) - found, err = checkIfTableExists(t, vc, "zone1-100", "rating") - require.NoError(t, err) - require.False(t, found) - }) - t.Run("unmount external cluster", func(t *testing.T) { - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "--", "--type=vitess", "--unmount", "ext1"); err != nil { - t.Fatalf("Mount command failed with %+v : %s\n", err, output) - } - - if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "--", "--type=vitess", "--list"); err != nil { - t.Fatalf("Mount command failed with %+v : %s\n", err, output) - } - expected = "\n" - require.Equal(t, expected, output) - - output, err = vc.VtctlClient.ExecuteCommandWithOutput("Mount", "--", "--type=vitess", "--show", "ext1") - require.Errorf(t, err, "there is no vitess cluster named ext1") - }) -} - -// TestVtctldMigrate runs an e2e test for importing from an external cluster using the vtctld Mount and Migrate commands. -// We have an anti-pattern in Vitess: vt executables look for an environment variable VTDATAROOT for certain cluster parameters -// like the log directory when they are created. Until this test we just needed a single cluster for e2e tests. -// However now we need to create an external Vitess cluster. For this we need a different VTDATAROOT and -// hence the VTDATAROOT env variable gets overwritten. -// Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT -func TestVtctldMigrateUnsharded(t *testing.T) { +func TestMigrateUnsharded(t *testing.T) { vc = NewVitessCluster(t, nil) oldDefaultReplicas := defaultReplicas @@ -256,7 +129,7 @@ func TestVtctldMigrateUnsharded(t *testing.T) { execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") waitForRowCountInTablet(t, targetPrimary, "product", "rating", 3) waitForRowCountInTablet(t, targetPrimary, "product", "review", 4) - vdiffSideBySide(t, ksWorkflow, "extcell1") + doVDiff(t, ksWorkflow, "extcell1") output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", "--target-keyspace", "product", "--workflow", "e1", "show") diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 7ef75390fbc..da8b9d1f96b 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -39,7 +39,7 @@ func TestMoveTablesBuffering(t *testing.T) { catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") - vdiff(t, targetKs, workflowName, "", false, true, nil) + vdiff(t, targetKs, workflowName, "", nil) waitForLowLag(t, "customer", workflowName) for i := 0; i < 10; i++ { tstWorkflowSwitchReadsAndWrites(t) diff --git a/go/test/endtoend/vreplication/multi_tenant_test.go b/go/test/endtoend/vreplication/multi_tenant_test.go index 6bceaeefc6e..6e73303be8a 100644 --- a/go/test/endtoend/vreplication/multi_tenant_test.go +++ b/go/test/endtoend/vreplication/multi_tenant_test.go @@ -229,7 +229,7 @@ func TestMultiTenantSimple(t *testing.T) { // Create again and run it to completion. createFunc() - vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) mt.SwitchReads() confirmOnlyReadsSwitched(t) @@ -389,7 +389,7 @@ func TestMultiTenantSharded(t *testing.T) { // Note: we cannot insert into the target keyspace since that is never routed to the source keyspace. lastIndex = insertRows(lastIndex, sourceKeyspace) waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, mt.workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) - vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) mt.SwitchReadsAndWrites() // Note: here we have already switched, and we can insert into the target keyspace, and it should get reverse // replicated to the source keyspace. The source keyspace is routed to the target keyspace at this point. @@ -586,7 +586,7 @@ func (mtm *multiTenantMigration) switchTraffic(tenantId int64) { mt := mtm.getActiveMoveTables(tenantId) ksWorkflow := fmt.Sprintf("%s.%s", mtm.targetKeyspace, mt.workflowName) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - vdiff(t, mt.targetKeyspace, mt.workflowName, defaultCellName, false, true, nil) + vdiff(t, mt.targetKeyspace, mt.workflowName, defaultCellName, nil) mtm.insertSomeData(t, tenantId, sourceKeyspaceName, numAdditionalRowsPerTenant) mt.SwitchReadsAndWrites() mtm.insertSomeData(t, tenantId, sourceKeyspaceName, numAdditionalRowsPerTenant) diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index eec304e0a4d..959a0169950 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -271,9 +271,7 @@ func (wf *workflow) complete() { // TestPartialMoveTablesWithSequences enhances TestPartialMoveTables by adding an unsharded keyspace which has a // sequence. This tests that the sequence is migrated correctly and that we can reverse traffic back to the source func TestPartialMoveTablesWithSequences(t *testing.T) { - origExtraVTGateArgs := extraVTGateArgs - extraVTGateArgs = append(extraVTGateArgs, []string{ "--enable-partial-keyspace-migration", "--schema_change_signal=false", @@ -356,8 +354,8 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { vtgateConn, closeConn = getVTGateConn() defer closeConn() - t.Run("Confirm routing rules", func(t *testing.T) { + t.Run("Confirm routing rules", func(t *testing.T) { // Global routing rules should be in place with everything going to the source keyspace (customer). confirmGlobalRoutingToSource(t) @@ -410,7 +408,6 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { defer vtgateConn.Close() t.Run("Validate shard and tablet type routing", func(t *testing.T) { - // No shard targeting _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) @@ -482,10 +479,10 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { insertCustomers(t) - output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "seqTgt.seq", "show") + output, err = tc.vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", wfSeq.toKeyspace, "show", "--workflow", wfSeq.name) require.NoError(t, err) - output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "seqSrc.seq_reverse", "show") + output, err = tc.vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", wfSeq.fromKeyspace, "show", "--workflow", fmt.Sprintf("%s_reverse", wfSeq.name)) require.NoError(t, err) wfSeq.complete() @@ -501,21 +498,19 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) - output, err := tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") - require.Error(t, err) - require.Contains(t, output, "no streams found") + output, err := tc.vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", reverseKs, "show", "--workflow", reverseWf) + require.NoError(t, err) + require.True(t, isEmptyWorkflowShowOutput(output)) - // Delete the original workflow - originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf) - _, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "delete") + // Be sure that we've deleted the original workflow. + _, _ = tc.vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "delete", "--workflow", wf) + output, err = tc.vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", wf) require.NoError(t, err) - output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "show") - require.Error(t, err) - require.Contains(t, output, "no streams found") + require.True(t, isEmptyWorkflowShowOutput(output)) } // Confirm that the global routing rules are now gone. - output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + output, err = tc.vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact") require.NoError(t, err) require.Equal(t, emptyGlobalRoutingRules, output) @@ -564,7 +559,7 @@ func insertCustomers(t *testing.T) { } func confirmGlobalRoutingToSource(t *testing.T) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact") require.NoError(t, err) result := gjson.Get(output, "rules") result.ForEach(func(attributeKey, attributeValue gjson.Result) bool { diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 7ae8f83416d..09e80b19bd4 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -127,7 +127,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab80Dash, workflowName, "MoveTables") - vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) mt.SwitchReadsAndWrites() time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.Complete() @@ -190,7 +190,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { } waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab80Dash, workflowName, "MoveTables") - vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) vtgateConn, closeConn := getVTGateConn() defer closeConn() @@ -199,7 +199,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- confirmGlobalRoutingToSource := func() { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact") require.NoError(t, err) result := gjson.Get(output, "rules") result.ForEach(func(attributeKey, attributeValue gjson.Result) bool { @@ -307,9 +307,6 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") workflowExec := tstWorkflowExec - if flavor == workflowFlavorVtctl { - workflowExec = tstWorkflowExecVtctl - } // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. @@ -337,7 +334,7 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTabDash80, workflowName, "MoveTables") - vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + vdiff(t, targetKeyspace, workflowName, defaultCellName, nil) mtDash80.SwitchReadsAndWrites() time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) @@ -366,21 +363,19 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { err = workflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", opts) require.NoError(t, err) - output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", "--shards", opts.shardSubset, fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") - require.Error(t, err) - require.Contains(t, output, "no streams found") - - // Delete the original workflow - originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf) - _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", "--shards", opts.shardSubset, originalKsWf, "delete") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", reverseKs, "show", "--workflow", reverseWf, "--shards", opts.shardSubset) require.NoError(t, err) - output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", "--shards", opts.shardSubset, originalKsWf, "show") - require.Error(t, err) - require.Contains(t, output, "no streams found") + require.True(t, isEmptyWorkflowShowOutput(output)) + + // Be sure we've deleted the original workflow. + _, _ = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "delete", "--workflow", wf, "--shards", opts.shardSubset) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKs, "show", "--workflow", wf, "--shards", opts.shardSubset) + require.NoError(t, err, output) + require.True(t, isEmptyWorkflowShowOutput(output)) } // Confirm that the global routing rules are now gone. - output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules", "--compact") require.NoError(t, err) require.Equal(t, emptyGlobalRoutingRules, output) @@ -390,7 +385,6 @@ func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { // TestPartialMoveTablesBasic tests partial move tables by moving each // customer shard -- -80,80- -- once a a time to customer2. -// We test with both the vtctlclient and vtctldclient flavors. func TestPartialMoveTablesBasic(t *testing.T) { currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables testPartialMoveTablesBasic(t, workflowFlavorVtctld) diff --git a/go/test/endtoend/vreplication/reference_test.go b/go/test/endtoend/vreplication/reference_test.go index 8ff77de8708..efef799878b 100644 --- a/go/test/endtoend/vreplication/reference_test.go +++ b/go/test/endtoend/vreplication/reference_test.go @@ -119,8 +119,8 @@ func TestReferenceTableMaterializationAndRouting(t *testing.T) { require.NoError(t, err) vtgateConn.Close() - materialize(t, materializeCatSpec, false) - materialize(t, materializeMfgSpec, false) + materialize(t, materializeCatSpec) + materialize(t, materializeMfgSpec) tabDash80 := vc.getPrimaryTablet(t, sks, "-80") tab80Dash := vc.getPrimaryTablet(t, sks, "80-") diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 28ffc762ecd..a8f1996d0d9 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -89,7 +89,7 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") catchup(t, targetTab1, workflowName, "Reshard") catchup(t, targetTab2, workflowName, "Reshard") - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") return nil } @@ -104,7 +104,7 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { @@ -112,7 +112,7 @@ func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { } // tstWorkflowExec executes a MoveTables or Reshard workflow command using -// vtctldclient. If you need to use the legacy vtctlclient, use +// vtctldclient. // tstWorkflowExecVtctl instead. func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string, options *workflowExecOptions) error { @@ -181,74 +181,6 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, return nil } -// tstWorkflowExecVtctl executes a MoveTables or Reshard workflow command using -// vtctlclient. It should operate exactly the same way as tstWorkflowExec, but -// using the legacy client. -func tstWorkflowExecVtctl(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, - sourceShards, targetShards string, options *workflowExecOptions) error { - - var args []string - if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { - args = append(args, "MoveTables") - } else { - args = append(args, "Reshard") - } - - args = append(args, "--") - - if BypassLagCheck { - args = append(args, "--max_replication_lag_allowed=2542087h") - } - if options.atomicCopy { - args = append(args, "--atomic-copy") - } - switch action { - case workflowActionCreate: - if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { - args = append(args, "--source", sourceKs) - if tables != "" { - args = append(args, "--tables", tables) - } else { - args = append(args, "--all") - } - if sourceShards != "" { - args = append(args, "--source_shards", sourceShards) - } - } else { - args = append(args, "--source_shards", sourceShards, "--target_shards", targetShards) - } - // Test new experimental --defer-secondary-keys flag - switch currentWorkflowType { - case binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowType_Migrate, binlogdatapb.VReplicationWorkflowType_Reshard: - if !options.atomicCopy && options.deferSecondaryKeys { - args = append(args, "--defer-secondary-keys") - } - args = append(args, "--initialize-target-sequences") // Only used for MoveTables - } - case workflowActionMirrorTraffic: - args = append(args, "--percent", strconv.FormatFloat(float64(options.percent), byte('f'), -1, 32)) - default: - if options.shardSubset != "" { - args = append(args, "--shards", options.shardSubset) - } - } - if cells != "" { - args = append(args, "--cells", cells) - } - if tabletTypes != "" { - args = append(args, "--tablet_types", tabletTypes) - } - args = append(args, "--timeout", time.Minute.String()) - ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) - args = append(args, action, ksWorkflow) - output, err := vc.VtctlClient.ExecuteCommandWithOutput(args...) - lastOutput = output - if err != nil { - return fmt.Errorf("%s: %s", err, output) - } - return nil -} - func tstWorkflowSwitchReads(t *testing.T, tabletTypes, cells string) { if tabletTypes == "" { tabletTypes = "replica,rdonly" @@ -288,23 +220,15 @@ func tstWorkflowComplete(t *testing.T) error { } // testWorkflowUpdate is a very simple test of the workflow update -// vtctlclient/vtctldclient command. +// vtctldclient command. // It performs a non-behavior impacting update, setting tablet-types // to primary,replica,rdonly (the only applicable types in these tests). func testWorkflowUpdate(t *testing.T) { tabletTypes := "primary,replica,rdonly" - // Test vtctlclient first. - _, err := vc.VtctlClient.ExecuteCommandWithOutput("workflow", "--", "--tablet-types", tabletTypes, "noexist.noexist", "update") - require.Error(t, err, err) - resp, err := vc.VtctlClient.ExecuteCommandWithOutput("workflow", "--", "--tablet-types", tabletTypes, ksWorkflow, "update") - require.NoError(t, err) - require.NotEmpty(t, resp) - - // Test vtctldclient last. - _, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) + _, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) require.Error(t, err) // Change the tablet-types to rdonly. - resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") + resp, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") require.NoError(t, err, err) // Confirm that we changed the workflow. var ures vtctldatapb.WorkflowUpdateResponse @@ -559,10 +483,10 @@ func testReplicatingWithPKEnumCols(t *testing.T) { insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" execVtgateQuery(t, vtgateConn, sourceKs, deleteQuery) waitForNoWorkflowLag(t, vc, targetKs, workflowName) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") execVtgateQuery(t, vtgateConn, sourceKs, insertQuery) waitForNoWorkflowLag(t, vc, targetKs, workflowName) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") } func testReshardV2Workflow(t *testing.T) { @@ -697,9 +621,9 @@ func testMoveTablesV2Workflow(t *testing.T) { testRestOfWorkflow(t) // Create our primary intra-keyspace materialization. - materialize(t, materializeCustomerNameSpec, false) + materialize(t, materializeCustomerNameSpec) // Create a second one to confirm that multiple ones get migrated correctly. - materialize(t, materializeCustomerTypeSpec, false) + materialize(t, materializeCustomerTypeSpec) materializeShow() output, err = vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) @@ -986,7 +910,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) catchup(t, targetTab1, workflow, workflowType) catchup(t, targetTab2, workflow, workflowType) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") } allCellNames := getCellNames(cells) var switchReadsFollowedBySwitchWrites = func() { diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index be9ce67a626..704742d305b 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -14,7 +14,7 @@ import ( const GetCurrentTablesQuery = "show tables from _vt" func getSidecarDBTables(t *testing.T, tabletID string) (numTablets int, tables []string) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", "--json", tabletID, GetCurrentTablesQuery) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", "--json", tabletID, GetCurrentTablesQuery) require.NoError(t, err) result := gjson.Get(output, "rows") require.NotNil(t, result) @@ -118,7 +118,7 @@ func validateSidecarDBTables(t *testing.T, tabletID string, tables []string) { func modifySidecarDBSchema(t *testing.T, vc *VitessCluster, tabletID string, ddls []string) (numChanges int) { for _, ddl := range ddls { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", tabletID, ddl) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", tabletID, ddl) require.NoErrorf(t, err, output) } return len(ddls) diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 2c0a9a4f5a5..3faa9e76a78 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -87,14 +87,13 @@ func TestMoveTablesTZ(t *testing.T) { tables := "datze" - ksErrorWorkflow := fmt.Sprintf("%s.%s", targetKs, "tzerr") - output, err := vc.VtctlClient.ExecuteCommandWithOutput("MoveTables", "--", "--source", sourceKs, "--tables", - tables, "--source_time_zone", "US/Pacifik", "Create", ksErrorWorkflow) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", targetKs, "Create", + "--source-keyspace", sourceKs, "--tables", tables, "--source-time-zone", "US/Pacifik") require.Error(t, err, output) require.Contains(t, output, "time zone is invalid") - output, err = vc.VtctlClient.ExecuteCommandWithOutput("MoveTables", "--", "--source", sourceKs, "--tables", - tables, "--source_time_zone", "US/Pacific", "Create", ksWorkflow) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--workflow", workflow, "--target-keyspace", targetKs, "Create", + "--source-keyspace", sourceKs, "--tables", tables, "--source-time-zone", "US/Pacific") require.NoError(t, err, output) catchup(t, customerTab, workflow, "MoveTables") @@ -105,7 +104,7 @@ func TestMoveTablesTZ(t *testing.T) { _, err = vtgateConn.ExecuteFetch("insert into datze(id, dt2) values (12, '2022-04-01 5:06:07')", 1, false) // dst require.NoError(t, err) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") // update to test date conversions in replication (vplayer) mode (update statements) _, err = vtgateConn.ExecuteFetch("update datze set dt2 = '2022-04-01 5:06:07' where id = 11", 1, false) // dst @@ -113,7 +112,7 @@ func TestMoveTablesTZ(t *testing.T) { _, err = vtgateConn.ExecuteFetch("update datze set dt2 = '2022-01-01 10:20:30' where id = 12", 1, false) // standard time require.NoError(t, err) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") query := "select * from datze" qrSourceUSPacific, err := productTab.QueryTablet(query, sourceKs, true) @@ -175,7 +174,7 @@ func TestMoveTablesTZ(t *testing.T) { require.Equal(t, row.AsString("dt2", ""), qrTargetUSPacific.Named().Rows[i].AsString("dt2", "")) require.Equal(t, row.AsString("ts1", ""), qrTargetUSPacific.Named().Rows[i].AsString("ts1", "")) } - output, err = vc.VtctlClient.ExecuteCommandWithOutput("MoveTables", "--", "SwitchTraffic", ksWorkflow) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("MoveTables", "--target-keyspace", targetKs, "SwitchTraffic", "--workflow", workflow) require.NoError(t, err, output) qr, err := productTab.QueryTablet(sqlparser.BuildParsedQuery("select * from %s.vreplication where workflow='%s_reverse'", @@ -192,5 +191,5 @@ func TestMoveTablesTZ(t *testing.T) { // inserts to test date conversions in reverse replication execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") - vdiffSideBySide(t, ksReverseWorkflow, "") + doVDiff(t, ksReverseWorkflow, "") } diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 612ba00236b..6116e26eef5 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -210,17 +210,19 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, statsShard := arrTargetShards[0] statsTablet := vc.getPrimaryTablet(t, tc.targetKs, statsShard) var args []string - args = append(args, tc.typ, "--") - args = append(args, "--source", tc.sourceKs) - if tc.typ == "Reshard" { - args = append(args, "--source_shards", tc.sourceShards, "--target_shards", tc.targetShards) - } + args = append(args, tc.typ) + args = append(args, "--workflow", tc.workflow) + args = append(args, "--target-keyspace", tc.targetKs) allCellNames := getCellNames(nil) + args = append(args, "create") args = append(args, "--cells", allCellNames) - args = append(args, "--tables", tc.tables) - args = append(args, "Create") - args = append(args, ksWorkflow) - err := vc.VtctlClient.ExecuteCommand(args...) + if tc.typ == "Reshard" { + args = append(args, "--source-shards", tc.sourceShards, "--target-shards", tc.targetShards) + } else { + args = append(args, "--source-keyspace", tc.sourceKs) + args = append(args, "--tables", tc.tables) + } + err := vc.VtctldClient.ExecuteCommand(args...) require.NoError(t, err) waitForShardsToCatchup := func() { @@ -279,8 +281,8 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, waitForShardsToCatchup() tc.vdiffCount++ // We only did vtctldclient vdiff create } else { - vdiff(t, tc.targetKs, tc.workflow, allCellNames, true, true, nil) - tc.vdiffCount += 2 // We did vtctlclient AND vtctldclient vdiff create + vdiff(t, tc.targetKs, tc.workflow, allCellNames, nil) + tc.vdiffCount++ } checkVDiffCountStat(t, statsTablet, tc.vdiffCount) @@ -288,7 +290,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, // compared by vdiff per table at the controller level -- works as expected. vdrc, err := getDebugVar(t, statsTablet.Port, []string{"VDiffRowsCompared"}) require.NoError(t, err, "failed to get VDiffRowsCompared stat from %s-%d tablet: %v", statsTablet.Cell, statsTablet.TabletUID, err) - uuid, jsout := performVDiff2Action(t, false, ksWorkflow, allCellNames, "show", "last", false, "--verbose") + uuid, jsout := performVDiff2Action(t, ksWorkflow, allCellNames, "show", "last", false, "--verbose") expect := gjson.Get(jsout, fmt.Sprintf("Reports.customer.%s", statsShard)).Int() got := gjson.Get(vdrc, fmt.Sprintf("%s.%s.%s", tc.workflow, uuid, "customer")).Int() require.Equal(t, expect, got, "expected VDiffRowsCompared stat to be %d, but got %d", expect, got) @@ -307,12 +309,12 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, if tc.stop { testStop(t, ksWorkflow, allCellNames) - tc.vdiffCount++ // We did either vtctlclient OR vtctldclient vdiff create + tc.vdiffCount++ } if tc.testCLICreateWait { testCLICreateWait(t, ksWorkflow, allCellNames) - tc.vdiffCount++ // We did either vtctlclient OR vtctldclient vdiff create + tc.vdiffCount++ } if tc.testCLIErrors { testCLIErrors(t, ksWorkflow, allCellNames) @@ -330,14 +332,14 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, // Create another VDiff record to confirm it gets deleted when the workflow is completed. ts := time.Now() - uuid, _ = performVDiff2Action(t, false, ksWorkflow, allCellNames, "create", "", false) - waitForVDiff2ToComplete(t, false, ksWorkflow, allCellNames, uuid, ts) + uuid, _ = performVDiff2Action(t, ksWorkflow, allCellNames, "create", "", false) + waitForVDiff2ToComplete(t, ksWorkflow, allCellNames, uuid, ts) tc.vdiffCount++ checkVDiffCountStat(t, statsTablet, tc.vdiffCount) - err = vc.VtctlClient.ExecuteCommand(tc.typ, "--", "SwitchTraffic", ksWorkflow) + err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.targetKs, "SwitchTraffic") require.NoError(t, err) - err = vc.VtctlClient.ExecuteCommand(tc.typ, "--", "Complete", ksWorkflow) + err = vc.VtctldClient.ExecuteCommand(tc.typ, "--workflow", tc.workflow, "--target-keyspace", tc.targetKs, "Complete") require.NoError(t, err) // Confirm the VDiff data is deleted for the workflow. @@ -348,18 +350,18 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, func testCLIErrors(t *testing.T, ksWorkflow, cells string) { t.Run("Client error handling", func(t *testing.T) { - _, output := performVDiff2Action(t, false, ksWorkflow, cells, "badcmd", "", true) + _, output := performVDiff2Action(t, ksWorkflow, cells, "badcmd", "", true) require.Contains(t, output, "Usage:") - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "create", "invalid_uuid", true) + _, output = performVDiff2Action(t, ksWorkflow, cells, "create", "invalid_uuid", true) require.Contains(t, output, "invalid UUID provided") - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "resume", "invalid_uuid", true) + _, output = performVDiff2Action(t, ksWorkflow, cells, "resume", "invalid_uuid", true) require.Contains(t, output, "invalid UUID provided") - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "delete", "invalid_uuid", true) + _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", "invalid_uuid", true) require.Contains(t, output, "invalid argument provided") - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "invalid_uuid", true) + _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "invalid_uuid", true) require.Contains(t, output, "invalid argument provided") - uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "create", uuid, true) + uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) + _, output = performVDiff2Action(t, ksWorkflow, cells, "create", uuid, true) require.Contains(t, output, "already exists") }) } @@ -432,7 +434,7 @@ func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell // Delete this vdiff as we used --auto-start=false and thus it never starts and // does not provide the normally expected show --verbose --format=json output. - _, output := performVDiff2Action(t, false, fmt.Sprintf("%s.%s", targetKs, workflowName), "", "delete", vduuid.String(), false) + _, output := performVDiff2Action(t, fmt.Sprintf("%s.%s", targetKs, workflowName), "", "delete", vduuid.String(), false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) }) } @@ -450,35 +452,35 @@ func testDelete(t *testing.T, ksWorkflow, cells string) { } return int64(len(seen)) } - _, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) + _, output := performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) initialVDiffCount := uuidCount(gjson.Get(output, "#.UUID").Array()) for ; initialVDiffCount < 3; initialVDiffCount++ { - _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false) + _, _ = performVDiff2Action(t, ksWorkflow, cells, "create", "", false) } // Now let's confirm that we have at least 3 unique VDiffs. - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) + _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) require.GreaterOrEqual(t, uuidCount(gjson.Get(output, "#.UUID").Array()), int64(3)) // And that our initial count is what we expect. require.Equal(t, initialVDiffCount, uuidCount(gjson.Get(output, "#.UUID").Array())) // Test show last with verbose too as a side effect. - uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false, "--verbose") + uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false, "--verbose") // The TableSummary is only present with --verbose. require.Contains(t, output, `"TableSummary":`) // Now let's delete one of the VDiffs. - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "delete", uuid, false) + _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", uuid, false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) // And confirm that our unique VDiff count has only decreased by one. - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) + _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) require.Equal(t, initialVDiffCount-1, uuidCount(gjson.Get(output, "#.UUID").Array())) // Now let's delete all of them. - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "delete", "all", false) + _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", "all", false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) // And finally confirm that we have no more VDiffs. - _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) + _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) require.Equal(t, int64(0), gjson.Get(output, "#").Int()) }) } @@ -502,7 +504,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // Confirm the last VDiff is in the expected completed state. - uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) + uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) jsonOutput := getVDiffInfo(output) require.Equal(t, "completed", jsonOutput.State) // Save the number of rows compared in previous runs. @@ -518,8 +520,8 @@ func testResume(t *testing.T, tc *testCase, cells string) { // confirm that the VDiff was resumed, able to complete, and we compared the // expected number of rows in total (original run and resume) - _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "resume", uuid, false) - info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) + _, _ = performVDiff2Action(t, ksWorkflow, cells, "resume", uuid, false) + info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, ogTime) require.NotNil(t, info) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) @@ -529,10 +531,10 @@ func testResume(t *testing.T, tc *testCase, cells string) { func testStop(t *testing.T, ksWorkflow, cells string) { t.Run("Stop", func(t *testing.T) { // Create a new VDiff and immediately stop it. - uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false) - _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "stop", uuid, false) + uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "create", "", false) + _, _ = performVDiff2Action(t, ksWorkflow, cells, "stop", uuid, false) // Confirm the VDiff is in the expected state. - _, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", uuid, false) + _, output := performVDiff2Action(t, ksWorkflow, cells, "show", uuid, false) jsonOutput := getVDiffInfo(output) // It may have been able to complete before we could stop it (there's virtually no data // to diff). There's no way to avoid this potential race so don't consider that a failure. @@ -549,7 +551,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // Confirm the last VDiff is in the expected completed state. - uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) + uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) jsonOutput := getVDiffInfo(output) require.Equal(t, "completed", jsonOutput.State) // Save the number of rows compared in the first run. @@ -576,7 +578,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // Confirm that the VDiff was retried, able to complete, and we compared the expected // number of rows in total (original run and retry). - info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) + info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, ogTime) require.NotNil(t, info) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) @@ -584,10 +586,10 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { } func testCLICreateWait(t *testing.T, ksWorkflow string, cells string) { - t.Run("vtctl create and wait", func(t *testing.T) { + t.Run("vtctldclient create and wait", func(t *testing.T) { chCompleted := make(chan bool) go func() { - _, output := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, "--wait", "--wait-update-interval=1s") + _, output := performVDiff2Action(t, ksWorkflow, cells, "create", "", false, "--wait", "--wait-update-interval=1s") completed := false // We don't try to parse the JSON output as it may contain a series of outputs // that together do not form a valid JSON document. We can change this in the diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index fd223d78082..fcc112b670b 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -45,55 +45,18 @@ var ( runVDiffsSideBySide = true ) -func vdiff(t *testing.T, keyspace, workflow, cells string, vtctlclient, vtctldclient bool, wantV2Result *expectedVDiff2Result) { - if vtctlclient { - doVtctlclientVDiff(t, keyspace, workflow, cells, wantV2Result) - } - if vtctldclient { - doVtctldclientVDiff(t, keyspace, workflow, cells, wantV2Result) - } +func vdiff(t *testing.T, keyspace, workflow, cells string, wantV2Result *expectedVDiff2Result) { + doVtctldclientVDiff(t, keyspace, workflow, cells, wantV2Result) } -// vdiffSideBySide will run the VDiff command using both vtctlclient -// and vtctldclient. -func vdiffSideBySide(t *testing.T, ksWorkflow, cells string) { +func doVDiff(t *testing.T, ksWorkflow, cells string) { arr := strings.Split(ksWorkflow, ".") keyspace := arr[0] workflowName := arr[1] - if !runVDiffsSideBySide { - doVtctlclientVDiff(t, keyspace, workflowName, cells, nil) - return - } - vdiff(t, keyspace, workflowName, cells, true, true, nil) -} - -func doVtctlclientVDiff(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result) { - ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) - t.Run(fmt.Sprintf("vtctlclient vdiff %s", ksWorkflow), func(t *testing.T) { - // update-table-stats is needed in order to test progress reports. - uuid, _ := performVDiff2Action(t, true, ksWorkflow, cells, "create", "", false, "--auto-retry", - "--update-table-stats", fmt.Sprintf("--filtered_replication_wait_time=%v", vdiffTimeout/2)) - info := waitForVDiff2ToComplete(t, true, ksWorkflow, cells, uuid, time.Time{}) - require.NotNil(t, info) - require.Equal(t, workflow, info.Workflow) - require.Equal(t, keyspace, info.Keyspace) - if want != nil { - require.Equal(t, want.state, info.State) - require.Equal(t, strings.Join(want.shards, ","), info.Shards) - require.Equal(t, want.hasMismatch, info.HasMismatch) - } else { - require.Equal(t, "completed", info.State, "vdiff results: %+v", info) - require.False(t, info.HasMismatch, "vdiff results: %+v", info) - require.NotZero(t, info.RowsCompared) - } - if strings.Contains(t.Name(), "AcrossDBVersions") { - log.Errorf("VDiff resume cannot be guaranteed between major MySQL versions due to implied collation differences, skipping resume test...") - return - } - }) + doVtctldclientVDiff(t, keyspace, workflowName, cells, nil) } -func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cells, uuid string, completedAtMin time.Time) *vdiffInfo { +func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, completedAtMin time.Time) *vdiffInfo { var info *vdiffInfo var jsonStr string first := true @@ -102,7 +65,7 @@ func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cell go func() { for { time.Sleep(vdiffStatusCheckInterval) - _, jsonStr = performVDiff2Action(t, useVtctlclient, ksWorkflow, cells, "show", uuid, false) + _, jsonStr = performVDiff2Action(t, ksWorkflow, cells, "show", uuid, false) info = getVDiffInfo(jsonStr) require.NotNil(t, info) if info.State == "completed" { @@ -169,8 +132,8 @@ func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *e if len(extraFlags) > 0 { flags = append(flags, extraFlags...) } - uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, flags...) - info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, time.Time{}) + uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "create", "", false, flags...) + info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, time.Time{}) require.NotNil(t, info) require.Equal(t, workflow, info.Workflow) require.Equal(t, keyspace, info.Keyspace) @@ -191,56 +154,34 @@ func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *e }) } -func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, action, actionArg string, expectError bool, extraFlags ...string) (uuid string, output string) { +func performVDiff2Action(t *testing.T, ksWorkflow, cells, action, actionArg string, expectError bool, extraFlags ...string) (uuid string, output string) { var err error targetKeyspace, workflowName, ok := strings.Cut(ksWorkflow, ".") require.True(t, ok, "invalid keyspace.workflow value: %s", ksWorkflow) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) - if useVtctlclient { + args := []string{"VDiff", "--target-keyspace", targetKeyspace, "--workflow", workflowName, "--format=json", action} + if strings.ToLower(action) == string(vdiff2.CreateAction) { // This will always result in us using a PRIMARY tablet, which is all // we start in many e2e tests, but it avoids the tablet picker logic // where when you ONLY specify the PRIMARY type it then picks the // shard's primary and ignores any cell settings. - args := []string{"VDiff", "--", "--tablet_types=in_order:primary,replica", "--source_cell=" + cells, "--format=json"} - if len(extraFlags) > 0 { - args = append(args, extraFlags...) - } - args = append(args, ksWorkflow, action, actionArg) - output, err = execVDiffWithRetry(t, expectError, false, args) - log.Infof("vdiff output: %+v (err: %+v)", output, err) - if !expectError { - require.Nil(t, err) - uuid = gjson.Get(output, "UUID").String() - if action != "delete" && !(action == "show" && actionArg == "all") { // A UUID is not required - require.NoError(t, err) - require.NotEmpty(t, uuid) - } - } - } else { - args := []string{"VDiff", "--target-keyspace", targetKeyspace, "--workflow", workflowName, "--format=json", action} - if strings.ToLower(action) == string(vdiff2.CreateAction) { - // This will always result in us using a PRIMARY tablet, which is all - // we start in many e2e tests, but it avoids the tablet picker logic - // where when you ONLY specify the PRIMARY type it then picks the - // shard's primary and ignores any cell settings. - args = append(args, "--tablet-types=primary,replica", "--tablet-types-in-preference-order", "--source-cells="+cells) - } - if len(extraFlags) > 0 { - args = append(args, extraFlags...) - } - if actionArg != "" { - args = append(args, actionArg) - } + args = append(args, "--tablet-types=primary,replica", "--tablet-types-in-preference-order", "--source-cells="+cells) + } + if len(extraFlags) > 0 { + args = append(args, extraFlags...) + } + if actionArg != "" { + args = append(args, actionArg) + } - output, err = execVDiffWithRetry(t, expectError, true, args) - log.Infof("vdiff output: %+v (err: %+v)", output, err) - if !expectError { - require.NoError(t, err) - ouuid := gjson.Get(output, "UUID").String() - if action == "create" || (action == "show" && actionArg != "all") { // A UUID is returned - require.NotEmpty(t, ouuid) - uuid = ouuid - } + output, err = execVDiffWithRetry(t, expectError, args) + log.Infof("vdiff output: %+v (err: %+v)", output, err) + if !expectError { + require.NoError(t, err) + ouuid := gjson.Get(output, "UUID").String() + if action == "create" || (action == "show" && actionArg != "all") { // A UUID is returned + require.NotEmpty(t, ouuid) + uuid = ouuid } } @@ -264,7 +205,7 @@ type vdiffResult struct { } // execVDiffWithRetry will ignore transient errors that can occur during workflow state changes. -func execVDiffWithRetry(t *testing.T, expectError bool, useVtctldClient bool, args []string) (string, error) { +func execVDiffWithRetry(t *testing.T, expectError bool, args []string) (string, error) { ctx, cancel := context.WithTimeout(context.Background(), vdiffRetryTimeout) defer cancel() vdiffResultCh := make(chan vdiffResult) @@ -282,11 +223,7 @@ func execVDiffWithRetry(t *testing.T, expectError bool, useVtctldClient bool, ar time.Sleep(vdiffRetryInterval) } retry = false - if useVtctldClient { - output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) - } else { - output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) - } + output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) if err != nil { if expectError { result := vdiffResult{output: output, err: err} diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index a4c25941801..d668701100e 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -102,7 +102,7 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { doVdiff := func(workflowName, table string) { defer wg.Done() - vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + vdiff(t, targetKeyspace, workflowName, cellName, nil) } go doVdiff("wf1", "customer") go doVdiff("wf2", "customer2") diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 955afde2f18..72888604c2e 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -286,7 +286,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) // Wait for the copy phase to start waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) - // The initial copy phase should be blocking on the history list + // The initial copy phase should be blocking on the history list. confirmWorkflowHasCopiedNoData(t, targetKs, workflow) releaseInnoDBRowHistory(t, trxConn) trxConn.Close() @@ -336,7 +336,7 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) - materializeRollup(t, true) + materializeRollup(t) shardCustomer(t, true, []*Cell{defaultCell}, defaultCellName, false) @@ -351,11 +351,11 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string return } - materializeProduct(t, true) + materializeProduct(t) - materializeMerchantOrders(t, true) - materializeSales(t, true) - materializeMerchantSales(t, true) + materializeMerchantOrders(t) + materializeSales(t) + materializeMerchantSales(t) reshardMerchant2to3SplitMerge(t) reshardMerchant3to1Merge(t) @@ -499,13 +499,13 @@ func TestVStreamFlushBinlog(t *testing.T) { // Now we should rotate the binary logs ONE time on the source, even // though we're opening up multiple result streams (1 per table). runVDiffsSideBySide = false - vdiff(t, targetKs, workflow, defaultCellName, true, false, nil) + vdiff(t, targetKs, workflow, defaultCellName, nil) flushCount = int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) require.Equal(t, flushCount, int64(1), "VStreamerFlushedBinlogs should now be 1") // Now if we do another vdiff, we should NOT rotate the binlogs again // as we haven't been generating a lot of new binlog events. - vdiff(t, targetKs, workflow, defaultCellName, true, false, nil) + vdiff(t, targetKs, workflow, defaultCellName, nil) flushCount = int64(sourceTab.GetVars()["VStreamerFlushedBinlogs"].(float64)) require.Equal(t, flushCount, int64(1), "VStreamerFlushedBinlogs should still be 1") } @@ -623,7 +623,7 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { testVStreamFrom(t, vtgate, keyspace, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) - isTableInDenyList(t, vc, "product:0", "customer") + isTableInDenyList(t, vc, "product/0", "customer") // we tag along this test so as not to create the overhead of creating another cluster testVStreamCellFlag(t) } @@ -802,7 +802,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) } } - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") cellNames := getCellNames(cells) switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, workflowType, cellNames, ksWorkflow, false) @@ -834,7 +834,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl catchup(t, productTab, workflow, "MoveTables") - vdiffSideBySide(t, "product.p2c_reverse", "") + doVDiff(t, "product.p2c_reverse", "") if withOpenTx { execVtgateQuery(t, vtgateConn, "", deleteOpenTxQuery) } @@ -878,13 +878,13 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl switchWrites(t, workflowType, ksWorkflow, false) var exists bool - exists, err = isTableInDenyList(t, vc, "product:0", "customer") + exists, err = isTableInDenyList(t, vc, "product/0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) - exists, err = isTableInDenyList(t, vc, "product:0", "customer") + exists, err = isTableInDenyList(t, vc, "product/0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.False(t, exists) @@ -1069,7 +1069,7 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou } } restartWorkflow(t, ksWorkflow) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") if dryRunResultSwitchReads != nil { reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica", "--dry-run") } @@ -1108,7 +1108,7 @@ func shardOrders(t *testing.T) { workflowType := "MoveTables" catchup(t, customerTab1, workflow, workflowType) catchup(t, customerTab2, workflow, workflowType) - vdiffSideBySide(t, ksWorkflow, "") + doVDiff(t, ksWorkflow, "") switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) @@ -1119,18 +1119,11 @@ func shardOrders(t *testing.T) { } func checkThatVDiffFails(t *testing.T, keyspace, workflow string) { - ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) - t.Run("check that vdiffSideBySide won't run", func(t2 *testing.T) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v1", ksWorkflow) + t.Run("check that vdiff won't run", func(t2 *testing.T) { + output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--workflow", workflow, "--target-keyspace", keyspace, "create") require.Error(t, err) require.Contains(t, output, "invalid VDiff run") }) - t.Run("check that vdiff2 won't run", func(t2 *testing.T) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", ksWorkflow) - require.Error(t, err) - require.Contains(t, output, "invalid VDiff run") - - }) } func shardMerchant(t *testing.T) { @@ -1155,7 +1148,7 @@ func shardMerchant(t *testing.T) { catchup(t, merchantTab1, workflow, workflowType) catchup(t, merchantTab2, workflow, workflowType) - vdiffSideBySide(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") + doVDiff(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) printRoutingRules(t, vc, "After merchant movetables") @@ -1174,34 +1167,25 @@ func shardMerchant(t *testing.T) { }) } -func materialize(t *testing.T, spec string, useVtctldClient bool) { - if useVtctldClient { - t.Run("vtctldclient materialize", func(t *testing.T) { - // Split out the parameters from the JSON spec for - // use in the vtctldclient command flags. - // This allows us to test both clients with the same - // input. - sj := gjson.Parse(spec) - workflow := sj.Get("workflow").String() - require.NotEmpty(t, workflow, "workflow not found in spec: %s", spec) - sourceKeyspace := sj.Get("source_keyspace").String() - require.NotEmpty(t, sourceKeyspace, "source_keyspace not found in spec: %s", spec) - targetKeyspace := sj.Get("target_keyspace").String() - require.NotEmpty(t, targetKeyspace, "target_keyspace not found in spec: %s", spec) - tableSettings := sj.Get("table_settings").String() - require.NotEmpty(t, tableSettings, "table_settings not found in spec: %s", spec) - stopAfterCopy := sj.Get("stop-after-copy").Bool() // Optional - err := vc.VtctldClient.ExecuteCommand("materialize", "--workflow", workflow, "--target-keyspace", targetKeyspace, - "create", "--source-keyspace", sourceKeyspace, "--table-settings", tableSettings, - fmt.Sprintf("--stop-after-copy=%t", stopAfterCopy)) - require.NoError(t, err, "Materialize") - }) - } else { - t.Run("materialize", func(t *testing.T) { - err := vc.VtctlClient.ExecuteCommand("Materialize", spec) - require.NoError(t, err, "Materialize") - }) - } +func materialize(t *testing.T, spec string) { + t.Run("materialize", func(t *testing.T) { + // Split out the parameters from the JSON spec for + // use in the vtctldclient command flags. + sj := gjson.Parse(spec) + workflow := sj.Get("workflow").String() + require.NotEmpty(t, workflow, "workflow not found in spec: %s", spec) + sourceKeyspace := sj.Get("source_keyspace").String() + require.NotEmpty(t, sourceKeyspace, "source_keyspace not found in spec: %s", spec) + targetKeyspace := sj.Get("target_keyspace").String() + require.NotEmpty(t, targetKeyspace, "target_keyspace not found in spec: %s", spec) + tableSettings := sj.Get("table_settings").String() + require.NotEmpty(t, tableSettings, "table_settings not found in spec: %s", spec) + stopAfterCopy := sj.Get("stop-after-copy").Bool() // Optional + err := vc.VtctldClient.ExecuteCommand("materialize", "--workflow", workflow, "--target-keyspace", targetKeyspace, + "create", "--source-keyspace", sourceKeyspace, "--table-settings", tableSettings, + fmt.Sprintf("--stop-after-copy=%t", stopAfterCopy)) + require.NoError(t, err, "Materialize") + }) } func testMaterializeWithNonExistentTable(t *testing.T) { @@ -1216,14 +1200,14 @@ func testMaterializeWithNonExistentTable(t *testing.T) { }) } -func materializeProduct(t *testing.T, useVtctldClient bool) { +func materializeProduct(t *testing.T) { t.Run("materializeProduct", func(t *testing.T) { // Materializing from "product" keyspace to "customer" keyspace. workflow := "cproduct" keyspace := "customer" defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) - materialize(t, materializeProductSpec, useVtctldClient) + materialize(t, materializeProductSpec) customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "primary") for _, tab := range customerTablets { catchup(t, tab, workflow, "Materialize") @@ -1303,7 +1287,7 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { }) } -func materializeRollup(t *testing.T, useVtctldClient bool) { +func materializeRollup(t *testing.T) { t.Run("materializeRollup", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -1312,7 +1296,7 @@ func materializeRollup(t *testing.T, useVtctldClient bool) { applyVSchema(t, materializeSalesVSchema, keyspace) defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet - materialize(t, materializeRollupSpec, useVtctldClient) + materialize(t, materializeRollupSpec) catchup(t, productTab, workflow, "Materialize") waitForRowCount(t, vtgateConn, "product", "rollup", 1) waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", @@ -1320,13 +1304,13 @@ func materializeRollup(t *testing.T, useVtctldClient bool) { }) } -func materializeSales(t *testing.T, useVtctldClient bool) { +func materializeSales(t *testing.T) { t.Run("materializeSales", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() keyspace := "product" applyVSchema(t, materializeSalesVSchema, keyspace) - materialize(t, materializeSalesSpec, useVtctldClient) + materialize(t, materializeSalesSpec) defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") @@ -1336,12 +1320,12 @@ func materializeSales(t *testing.T, useVtctldClient bool) { }) } -func materializeMerchantSales(t *testing.T, useVtctldClient bool) { +func materializeMerchantSales(t *testing.T) { t.Run("materializeMerchantSales", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() workflow := "msales" - materialize(t, materializeMerchantSalesSpec, useVtctldClient) + materialize(t, materializeMerchantSalesSpec) defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { @@ -1353,14 +1337,14 @@ func materializeMerchantSales(t *testing.T, useVtctldClient bool) { }) } -func materializeMerchantOrders(t *testing.T, useVtctldClient bool) { +func materializeMerchantOrders(t *testing.T) { t.Run("materializeMerchantOrders", func(t *testing.T) { vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() workflow := "morders" keyspace := merchantKeyspace applyVSchema(t, merchantOrdersVSchema, keyspace) - materialize(t, materializeMerchantOrdersSpec, useVtctldClient) + materialize(t, materializeMerchantOrdersSpec) defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { @@ -1753,12 +1737,12 @@ func waitForInnoDBHistoryLength(t *testing.T, tablet *cluster.VttabletProcess, e require.Equal(t, 1, len(res.Rows)) historyLen, err = res.Rows[0][0].ToInt64() require.NoError(t, err) - if historyLen > expectedLength { + if historyLen >= expectedLength { return } select { case <-timer.C: - t.Fatalf("Did not reach the expected InnoDB history length of %d before the timeout of %s; last seen value: %d", expectedLength, defaultTimeout, historyLen) + require.FailNow(t, "Did not reach the minimum expected InnoDB history length of %d before the timeout of %s; last seen value: %d", expectedLength, defaultTimeout, historyLen) default: time.Sleep(defaultTick) } diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go index 4ee977c4d74..7a7247b39bf 100644 --- a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -180,7 +180,7 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = 'wf1'" - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", alias, query) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } confirmNoRoutingRules(t) @@ -263,7 +263,7 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. - vdiff(t, targetKeyspace, workflowName, "zone1", false, true, nil) + vdiff(t, targetKeyspace, workflowName, "zone1", nil) (*mt).SwitchReadsAndWrites() validateReadsRouteToTarget(t, "replica") @@ -282,7 +282,7 @@ func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetK func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, targetTabs map[string]*cluster.VttabletProcess) { for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", alias, "drop table customer") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, "drop table customer") require.NoError(t, err, output) } createFlags := []string{} @@ -492,7 +492,7 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards for _, tab := range targetTabs { alias := fmt.Sprintf("zone1-%d", tab.TabletUID) query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = '" + workflowName + "'" - output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", alias, query) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ExecuteFetchAsDBA", alias, query) require.NoError(t, err, output) } rs.Start() @@ -504,7 +504,7 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards for _, targetTab := range targetTabs { catchup(t, targetTab, workflowName, "Reshard") } - vdiff(t, keyspace, workflowName, "zone1", false, true, nil) + vdiff(t, keyspace, workflowName, "zone1", nil) shardReadsRouteToSource := func() { require.True(t, getShardRoute(t, keyspace, "-80", "replica")) @@ -524,14 +524,14 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards rs.SwitchReadsAndWrites() waitForLowLag(t, keyspace, workflowName+"_reverse") - vdiff(t, keyspace, workflowName+"_reverse", "zone1", true, false, nil) + vdiff(t, keyspace, workflowName+"_reverse", "zone1", nil) shardReadsRouteToTarget() shardWritesRouteToTarget() confirmStates(t, &wf, wrangler.WorkflowStateNotSwitched, wrangler.WorkflowStateAllSwitched) rs.ReverseReadsAndWrites() waitForLowLag(t, keyspace, workflowName) - vdiff(t, keyspace, workflowName, "zone1", false, true, nil) + vdiff(t, keyspace, workflowName, "zone1", nil) shardReadsRouteToSource() shardWritesRouteToSource() confirmStates(t, &wf, wrangler.WorkflowStateAllSwitched, wrangler.WorkflowStateNotSwitched) @@ -587,7 +587,7 @@ func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards confirmStates(t, &wf, wrangler.WorkflowStateReadsSwitched, wrangler.WorkflowStateNotSwitched) // Confirm that everything is still in sync after our switch fest. - vdiff(t, keyspace, workflowName, "zone1", false, true, nil) + vdiff(t, keyspace, workflowName, "zone1", nil) rs.SwitchReadsAndWrites() shardReadsRouteToTarget() diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index 6ca8dcfe472..e14d3be8720 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -120,7 +120,7 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { defer timer.Stop() log.Infof("Started ApplyVSchema") for { - if err := vc.VtctlClient.ExecuteCommand("ApplyVSchema", "--", "--vschema={}", "product"); err != nil { + if err := vc.VtctldClient.ExecuteCommand("ApplyVSchema", "--vschema={}", "product"); err != nil { log.Errorf("ApplyVSchema command failed with %+v\n", err) return } @@ -140,8 +140,8 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { }() <-ch // wait for enough ApplyVSchema calls before doing a PRS - if err := vc.VtctlClient.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", "product/0", - "--new_primary", "zone1-101", "--wait_replicas_timeout", defaultTimeout.String()); err != nil { + if err := vc.VtctldClient.ExecuteCommand("PlannedReparentShard", "product/0", + "--new-primary", "zone1-101", "--wait-replicas-timeout", defaultTimeout.String()); err != nil { require.NoError(t, err, "PlannedReparentShard command failed") } } diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 3f79a35b569..8079c968ebb 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -28,7 +28,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/log" - _ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient" _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" "vitess.io/vitess/go/vt/vtgate/vtgateconn" @@ -140,7 +139,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 1: if failover { insertMu.Lock() - output, err := vc.VtctlClient.ExecuteCommandWithOutput("PlannedReparentShard", "--", "--keyspace_shard=product/0", "--new_primary=zone1-101") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "product/0", "--new-primary=zone1-101") insertMu.Unlock() log.Infof("output of first PRS is %s", output) require.NoError(t, err) @@ -148,7 +147,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { case 2: if failover { insertMu.Lock() - output, err := vc.VtctlClient.ExecuteCommandWithOutput("PlannedReparentShard", "--", "--keyspace_shard=product/0", "--new_primary=zone1-100") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("PlannedReparentShard", "product/0", "--new-primary=zone1-100") insertMu.Unlock() log.Infof("output of second PRS is %s", output) require.NoError(t, err) diff --git a/go/test/endtoend/vreplication/wrappers_test.go b/go/test/endtoend/vreplication/wrappers_test.go index d1fff1af1c6..2ca1b3bb724 100644 --- a/go/test/endtoend/vreplication/wrappers_test.go +++ b/go/test/endtoend/vreplication/wrappers_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) type iWorkflow interface { @@ -50,17 +49,14 @@ type workflowFlavor int const ( workflowFlavorRandom workflowFlavor = iota - workflowFlavorVtctl workflowFlavorVtctld ) var workflowFlavors = []workflowFlavor{ - workflowFlavorVtctl, workflowFlavorVtctld, } var workflowFlavorNames = map[workflowFlavor]string{ - workflowFlavorVtctl: "vtctl", workflowFlavorVtctld: "vtctld", } @@ -100,8 +96,6 @@ func newMoveTables(vc *VitessCluster, mt *moveTablesWorkflow, flavor workflowFla flavor = workflowFlavors[rand.IntN(len(workflowFlavors))] } switch flavor { - case workflowFlavorVtctl: - mt2 = newVtctlMoveTables(mt) case workflowFlavorVtctld: mt2 = newVtctldMoveTables(mt) default: @@ -111,102 +105,6 @@ func newMoveTables(vc *VitessCluster, mt *moveTablesWorkflow, flavor workflowFla return mt2 } -type VtctlMoveTables struct { - *moveTablesWorkflow -} - -func (vmt *VtctlMoveTables) Flavor() string { - return "vtctl" -} - -func newVtctlMoveTables(mt *moveTablesWorkflow) *VtctlMoveTables { - return &VtctlMoveTables{mt} -} - -func (vmt *VtctlMoveTables) Create() { - currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - vmt.exec(workflowActionCreate) -} - -func (vmt *VtctlMoveTables) MirrorTraffic() { - // TODO implement me - panic("implement me") -} - -func (vmt *VtctlMoveTables) SwitchReadsAndWrites() { - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) - require.NoError(vmt.vc.t, err) -} - -func (vmt *VtctlMoveTables) ReverseReadsAndWrites() { - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionReverseTraffic, "", "", "", defaultWorkflowExecOptions) - require.NoError(vmt.vc.t, err) -} - -func (vmt *VtctlMoveTables) Show() { - // TODO implement me - panic("implement me") -} - -func (vmt *VtctlMoveTables) Status() { - currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - vmt.exec("Status") -} - -func (vmt *VtctlMoveTables) exec(action string) { - options := &workflowExecOptions{ - deferSecondaryKeys: false, - atomicCopy: vmt.atomicCopy, - } - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, action, vmt.tabletTypes, vmt.sourceShards, "", options) - require.NoError(vmt.vc.t, err) -} -func (vmt *VtctlMoveTables) SwitchReads() { - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionSwitchTraffic, "replica,rdonly", "", "", defaultWorkflowExecOptions) - require.NoError(vmt.vc.t, err) -} - -func (vmt *VtctlMoveTables) SwitchWrites() { - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionSwitchTraffic, "primary", "", "", defaultWorkflowExecOptions) - require.NoError(vmt.vc.t, err) -} -func (vmt *VtctlMoveTables) ReverseReads() { - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionReverseTraffic, "replica,rdonly", "", "", defaultWorkflowExecOptions) - require.NoError(vmt.vc.t, err) -} - -func (vmt *VtctlMoveTables) ReverseWrites() { - err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionReverseTraffic, "primary", "", "", defaultWorkflowExecOptions) - require.NoError(vmt.vc.t, err) -} - -func (vmt *VtctlMoveTables) Cancel() { - vmt.exec(workflowActionCancel) -} - -func (vmt *VtctlMoveTables) Complete() { - vmt.exec(workflowActionComplete) -} - -func (vmt *VtctlMoveTables) GetLastOutput() string { - return vmt.lastOutput -} - -func (vmt *VtctlMoveTables) Start() { - panic("implement me") -} - -func (vmt *VtctlMoveTables) Stop() { - panic("implement me") -} - var _ iMoveTables = (*VtctldMoveTables)(nil) type VtctldMoveTables struct { @@ -347,8 +245,6 @@ func newReshard(vc *VitessCluster, rs *reshardWorkflow, flavor workflowFlavor) i flavor = workflowFlavors[rand.IntN(len(workflowFlavors))] } switch flavor { - case workflowFlavorVtctl: - rs2 = newVtctlReshard(rs) case workflowFlavorVtctld: rs2 = newVtctldReshard(rs) default: @@ -358,93 +254,6 @@ func newReshard(vc *VitessCluster, rs *reshardWorkflow, flavor workflowFlavor) i return rs2 } -type VtctlReshard struct { - *reshardWorkflow -} - -func (vrs *VtctlReshard) ReverseReads() { - //TODO implement me - panic("implement me") -} - -func (vrs *VtctlReshard) ReverseWrites() { - //TODO implement me - panic("implement me") -} - -func (vrs *VtctlReshard) Flavor() string { - return "vtctl" -} - -func newVtctlReshard(rs *reshardWorkflow) *VtctlReshard { - return &VtctlReshard{rs} -} - -func (vrs *VtctlReshard) Create() { - currentWorkflowType = binlogdatapb.VReplicationWorkflowType_Reshard - vrs.exec(workflowActionCreate) -} - -func (vrs *VtctlReshard) MirrorTraffic() { - // TODO implement me - panic("implement me") -} - -func (vrs *VtctlReshard) Status() { - currentWorkflowType = binlogdatapb.VReplicationWorkflowType_Reshard - vrs.exec("Status") -} - -func (vrs *VtctlReshard) SwitchReadsAndWrites() { - vrs.exec(workflowActionSwitchTraffic) -} - -func (vrs *VtctlReshard) ReverseReadsAndWrites() { - vrs.exec(workflowActionReverseTraffic) -} - -func (vrs *VtctlReshard) Show() { - // TODO implement me - panic("implement me") -} - -func (vrs *VtctlReshard) exec(action string) { - options := &workflowExecOptions{} - err := tstWorkflowExecVtctl(vrs.vc.t, "", vrs.workflowName, "", vrs.targetKeyspace, - "", action, vrs.tabletTypes, vrs.sourceShards, vrs.targetShards, options) - require.NoError(vrs.vc.t, err) -} - -func (vrs *VtctlReshard) SwitchReads() { - // TODO implement me - panic("implement me") -} - -func (vrs *VtctlReshard) SwitchWrites() { - // TODO implement me - panic("implement me") -} - -func (vrs *VtctlReshard) Cancel() { - vrs.exec(workflowActionCancel) -} - -func (vrs *VtctlReshard) Complete() { - vrs.exec(workflowActionComplete) -} - -func (vrs *VtctlReshard) GetLastOutput() string { - return vrs.lastOutput -} - -func (vrs *VtctlReshard) Start() { - panic("implement me") -} - -func (vrs *VtctlReshard) Stop() { - panic("implement me") -} - var _ iReshard = (*VtctldReshard)(nil) type VtctldReshard struct { diff --git a/go/test/endtoend/vtcombo/recreate/recreate_test.go b/go/test/endtoend/vtcombo/recreate/recreate_test.go index 15cb63c3d7d..496d26c8062 100644 --- a/go/test/endtoend/vtcombo/recreate/recreate_test.go +++ b/go/test/endtoend/vtcombo/recreate/recreate_test.go @@ -132,9 +132,9 @@ func getMySQLConnectionCount(ctx context.Context, session *vtgateconn.VTGateSess } func assertTabletsPresent(t *testing.T) { - tmpCmd := exec.Command("vtctlclient", "--vtctl_client_protocol", "grpc", "--server", grpcAddress, "--stderrthreshold", "0", "ListAllTablets", "--", "test") + tmpCmd := exec.Command("vtctldclient", "--server", grpcAddress, "GetTablets", "--cell", "test") - log.Infof("Running vtctlclient with command: %v", tmpCmd.Args) + log.Infof("Running vtctldclient with command: %v", tmpCmd.Args) output, err := tmpCmd.CombinedOutput() require.Nil(t, err) diff --git a/go/test/endtoend/vtcombo/vttest_sample_test.go b/go/test/endtoend/vtcombo/vttest_sample_test.go index 4895c1195b0..af0decca3d3 100644 --- a/go/test/endtoend/vtcombo/vttest_sample_test.go +++ b/go/test/endtoend/vtcombo/vttest_sample_test.go @@ -238,9 +238,9 @@ func insertManyRows(ctx context.Context, t *testing.T, conn *vtgateconn.VTGateCo } func assertTabletsPresent(t *testing.T) { - tmpCmd := exec.Command("vtctlclient", "--vtctl_client_protocol", "grpc", "--server", grpcAddress, "--stderrthreshold", "0", "ListAllTablets", "--", "test") + tmpCmd := exec.Command("vtctldclient", "--server", grpcAddress, "GetTablets", "--cell", "test") - log.Infof("Running vtctlclient with command: %v", tmpCmd.Args) + log.Infof("Running vtctldclient with command: %v", tmpCmd.Args) output, err := tmpCmd.CombinedOutput() require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index e8280b3aa06..4012017b0c3 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -61,6 +61,10 @@ var ( } ]} ` + unsharded2Ks = "uks2" + + //go:embed unsharded2_schema.sql + unsharded2SchemaSQL string ) func TestMain(m *testing.M) { @@ -100,6 +104,17 @@ func TestMain(m *testing.M) { return 1 } + // This keyspace is used to test automatic addition of tables to global routing rules when + // there are multiple unsharded keyspaces. + uKs2 := &cluster.Keyspace{ + Name: unsharded2Ks, + SchemaSQL: unsharded2SchemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*uKs2, 0, false) + if err != nil { + return 1 + } + // apply routing rules err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { diff --git a/go/test/endtoend/vtgate/gen4/unsharded2_schema.sql b/go/test/endtoend/vtgate/gen4/unsharded2_schema.sql new file mode 100644 index 00000000000..c3df5bd0a56 --- /dev/null +++ b/go/test/endtoend/vtgate/gen4/unsharded2_schema.sql @@ -0,0 +1,13 @@ +create table u2_a +( + id bigint, + a bigint, + primary key (id) +) Engine = InnoDB; + +create table u2_b +( + id bigint, + b varchar(50), + primary key (id) +) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index bbcb338fa50..55fa139b290 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -814,6 +814,81 @@ func TestDDLTargeted(t *testing.T) { utils.AssertMatches(t, conn, `select id from ddl_targeted`, `[[INT64(1)]]`) } +// TestDynamicConfig tests the dynamic configurations. +func TestDynamicConfig(t *testing.T) { + t.Run("DiscoveryLowReplicationLag", func(t *testing.T) { + // Test initial config value + err := clusterInstance.VtgateProcess.WaitForConfig(`"discovery_low_replication_lag":30000000000`) + require.NoError(t, err) + defer func() { + // Restore default back. + clusterInstance.VtgateProcess.Config.DiscoveryLowReplicationLag = "30s" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + }() + clusterInstance.VtgateProcess.Config.DiscoveryLowReplicationLag = "15s" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + // Test final config value. + err = clusterInstance.VtgateProcess.WaitForConfig(`"discovery_low_replication_lag":"15s"`) + require.NoError(t, err) + }) + + t.Run("DiscoveryHighReplicationLag", func(t *testing.T) { + // Test initial config value + err := clusterInstance.VtgateProcess.WaitForConfig(`"discovery_high_replication_lag":7200000000000`) + require.NoError(t, err) + defer func() { + // Restore default back. + clusterInstance.VtgateProcess.Config.DiscoveryHighReplicationLag = "2h" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + }() + clusterInstance.VtgateProcess.Config.DiscoveryHighReplicationLag = "1h" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + // Test final config value. + err = clusterInstance.VtgateProcess.WaitForConfig(`"discovery_high_replication_lag":"1h"`) + require.NoError(t, err) + }) + + t.Run("DiscoveryMinServingVttablets", func(t *testing.T) { + // Test initial config value + err := clusterInstance.VtgateProcess.WaitForConfig(`"discovery_min_number_serving_vttablets":2`) + require.NoError(t, err) + defer func() { + // Restore default back. + clusterInstance.VtgateProcess.Config.DiscoveryMinServingVttablets = "2" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + }() + clusterInstance.VtgateProcess.Config.DiscoveryMinServingVttablets = "1" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + // Test final config value. + err = clusterInstance.VtgateProcess.WaitForConfig(`"discovery_min_number_serving_vttablets":"1"`) + require.NoError(t, err) + }) + + t.Run("DiscoveryLegacyReplicationLagAlgo", func(t *testing.T) { + // Test initial config value + err := clusterInstance.VtgateProcess.WaitForConfig(`"discovery_legacy_replication_lag_algorithm":""`) + require.NoError(t, err) + defer func() { + // Restore default back. + clusterInstance.VtgateProcess.Config.DiscoveryLegacyReplicationLagAlgo = "true" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + }() + clusterInstance.VtgateProcess.Config.DiscoveryLegacyReplicationLagAlgo = "false" + err = clusterInstance.VtgateProcess.RewriteConfiguration() + require.NoError(t, err) + // Test final config value. + err = clusterInstance.VtgateProcess.WaitForConfig(`"discovery_legacy_replication_lag_algorithm":"false"`) + require.NoError(t, err) + }) +} + func TestLookupErrorMetric(t *testing.T) { conn, closer := start(t) defer closer() diff --git a/go/test/endtoend/vtgate/plan_tests/main_test.go b/go/test/endtoend/vtgate/plan_tests/main_test.go index 504ec3ffb26..2dc2e70120b 100644 --- a/go/test/endtoend/vtgate/plan_tests/main_test.go +++ b/go/test/endtoend/vtgate/plan_tests/main_test.go @@ -22,6 +22,7 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -86,7 +87,7 @@ func TestMain(m *testing.M) { // TODO: (@GuptaManan100/@systay): Also run the tests with normalizer on. clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--normalize_queries=false", - "--schema_change_signal=false", + "--schema_change_signal=true", ) // Start vtgate @@ -178,7 +179,7 @@ func verifyTestExpectations(t *testing.T, pd engine.PrimitiveDescription, test p // 1. Verify that the Join primitive sees atleast 1 row on the left side. engine.WalkPrimitiveDescription(pd, func(description engine.PrimitiveDescription) { if description.OperatorType == "Join" { - require.NotZero(t, description.Inputs[0].RowsReceived[0]) + assert.NotZero(t, description.Inputs[0].RowsReceived[0]) } }) diff --git a/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go b/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go index 5c5447fe6b6..0068616c3b8 100644 --- a/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go +++ b/go/test/endtoend/vtgate/plan_tests/plan_e2e_test.go @@ -19,11 +19,22 @@ package plan_tests import ( "testing" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/sqlparser" ) func TestE2ECases(t *testing.T) { - e2eTestCaseFiles := []string{"select_cases.json", "filter_cases.json", "dml_cases.json"} + err := utils.WaitForAuthoritative(t, "main", "source_of_ref", clusterInstance.VtgateProcess.ReadVSchema) + require.NoError(t, err) + + e2eTestCaseFiles := []string{ + "select_cases.json", + "filter_cases.json", + "dml_cases.json", + "reference_cases.json", + } mcmp, closer := start(t) defer closer() loadSampleData(t, mcmp) @@ -34,7 +45,11 @@ func TestE2ECases(t *testing.T) { if test.SkipE2E { mcmp.AsT().Skip(test.Query) } - mcmp.Exec(test.Query) + stmt, err := sqlparser.NewTestParser().Parse(test.Query) + require.NoError(mcmp.AsT(), err) + sqlparser.RemoveKeyspaceIgnoreSysSchema(stmt) + + mcmp.ExecVitessAndMySQLDifferentQueries(test.Query, sqlparser.String(stmt)) pd := utils.ExecTrace(mcmp.AsT(), mcmp.VtConn, test.Query) verifyTestExpectations(mcmp.AsT(), pd, test) if mcmp.VtConn.IsClosed() { diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index ee9be542634..536dfa7500a 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -95,7 +95,7 @@ func TestMain(m *testing.M) { vtParams = clusterInstance.GetVTParams(keyspaceName) // create mysql instance and connection parameters - conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL) + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL, uschemaSQL) if err != nil { fmt.Println(err) return 1 diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 62b859ab1d3..7ab0fe7ef54 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -25,6 +25,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/sqlparser" _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/assert" @@ -163,6 +164,8 @@ func TestSetAndGetLastInsertID(t *testing.T) { "update t1 set id2 = last_insert_id(%d) where id1 = 2", "update t1 set id2 = 88 where id1 = last_insert_id(%d)", "delete from t1 where id1 = last_insert_id(%d)", + "select id2, last_insert_id(count(*)) from t1 where %d group by id2", + "set @x = last_insert_id(%d)", } for _, workload := range []string{"olap", "oltp"} { @@ -175,7 +178,7 @@ func TestSetAndGetLastInsertID(t *testing.T) { require.NoError(t, err) } - // Insert a row for UPDATE tests + // Insert a few rows for UPDATE tests mcmp.Exec("insert into t1 (id1, id2) values (1, 10)") for _, query := range queries { @@ -186,6 +189,96 @@ func TestSetAndGetLastInsertID(t *testing.T) { } } +func TestSetAndGetLastInsertIDWithInsertUnsharded(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + tests := []string{ + "insert into uks.unsharded(id1, id2) values (last_insert_id(%d),12)", + "insert into uks.unsharded(id1, id2) select last_insert_id(%d), 453", + } + + i := 0 + getVal := func() int { + defer func() { i++ }() + return i + } + + runTests := func(mcmp *utils.MySQLCompare) { + for _, test := range tests { + + lastInsertID := getVal() + query := fmt.Sprintf(test, lastInsertID) + + stmt, err := sqlparser.NewTestParser().Parse(query) + require.NoError(mcmp.AsT(), err) + sqlparser.RemoveKeyspaceIgnoreSysSchema(stmt) + + mcmp.ExecVitessAndMySQLDifferentQueries(query, sqlparser.String(stmt)) + mcmp.Exec("select last_insert_id()") + } + } + + for _, workload := range []string{"olap", "oltp"} { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { + _, err := mcmp.VtConn.ExecuteFetch("set workload = "+workload, 1, false) + require.NoError(t, err) + runTests(mcmp) + + // run the queries again, but inside a transaction this time + mcmp.Exec("begin") + runTests(mcmp) + mcmp.Exec("commit") + }) + } + + // Now test to set the last insert id to 0, see that it has changed correctly even if the value is 0 + mcmp.ExecVitessAndMySQLDifferentQueries( + "insert into uks.unsharded(id1, id2) values (last_insert_id(0),12)", + "insert into unsharded(id1, id2) values (last_insert_id(0),12)", + ) + mcmp.Exec("select last_insert_id()") +} + +func TestSetAndGetLastInsertIDWithInsert(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + tests := []string{ + "insert into t1(id1, id2) values (last_insert_id(%d) ,%d)", + "insert into t1(id1, id2) values (%d, last_insert_id(%d))", + "insert into t1(id1, id2) select last_insert_id(%d), %d", + "insert into t1(id1, id2) select last_insert_id(id1+%d), 12 from t1 where 1 > %d", + } + + i := 0 + getVal := func() int { + defer func() { i++ }() + return i + } + + runTests := func(mcmp *utils.MySQLCompare) { + for _, test := range tests { + query := fmt.Sprintf(test, getVal(), getVal()) + mcmp.Exec(query) + mcmp.Exec("select last_insert_id()") + } + } + + for _, workload := range []string{"olap", "oltp"} { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { + _, err := mcmp.VtConn.ExecuteFetch("set workload = "+workload, 1, false) + require.NoError(t, err) + runTests(mcmp) + + // run the queries again, but inside a transaction this time + mcmp.Exec("begin") + runTests(mcmp) + mcmp.Exec("commit") + }) + } +} + // TestVindexHints tests that vindex hints work as intended. func TestVindexHints(t *testing.T) { mcmp, closer := start(t) @@ -574,3 +667,23 @@ func TestTimeZones(t *testing.T) { }) } } + +// TestSemiJoin tests that the semi join works as intended. +func TestSemiJoin(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + for i := 1; i <= 1000; i++ { + mcmp.Exec(fmt.Sprintf("insert into t1(id1, id2) values (%d, %d)", i, 2*i)) + mcmp.Exec(fmt.Sprintf("insert into tbl(id, unq_col, nonunq_col) values (%d, %d, %d)", i, 2*i, 3*i)) + } + + // Test that the semi join works as intended + for _, mode := range []string{"oltp", "olap"} { + mcmp.Run(mode, func(mcmp *utils.MySQLCompare) { + utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode)) + + mcmp.Exec("select id1, id2 from t1 where exists (select id from tbl where nonunq_col = t1.id2) order by id1") + }) + } +} diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 3fe43fa8f8f..22091e5cce5 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -139,7 +139,7 @@ func TestAPIEndpoints(t *testing.T) { }) t.Run("Replication Analysis API", func(t *testing.T) { - // use vtctlclient to stop replication + // use vtctldclient to stop replication _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index a4ed71945be..4a2f50b4168 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -216,7 +216,7 @@ func TestVTOrcRepairs(t *testing.T) { }) t.Run("StopReplication", func(t *testing.T) { - // use vtctlclient to stop replication + // use vtctldclient to stop replication _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 9017d35a8c5..de60420eee3 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/logic" ) @@ -44,7 +45,7 @@ func TestDownPrimary(t *testing.T) { // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, - }, 1, "semi_sync") + }, 1, policy.DurabilitySemiSync) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo @@ -115,7 +116,7 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, policy.DurabilityNone) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] curPrimary := shard0.Vttablets[0] @@ -170,7 +171,7 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, policy.DurabilityNone) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo @@ -199,7 +200,7 @@ func TestDeletedPrimaryTablet(t *testing.T) { // Disable VTOrc recoveries vtOrcProcess.DisableGlobalRecoveries(t) - // use vtctlclient to stop replication on the replica + // use vtctldclient to stop replication on the replica _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // insert a write that is not available on the replica. @@ -209,7 +210,7 @@ func TestDeletedPrimaryTablet(t *testing.T) { _ = curPrimary.VttabletProcess.TearDown() err = curPrimary.MysqlctlProcess.Stop() require.NoError(t, err) - // use vtctlclient to start replication on the replica back + // use vtctldclient to start replication on the replica back _, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StartReplication", replica.Alias) require.NoError(t, err) err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", curPrimary.Alias) @@ -241,7 +242,7 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossCellFailover: true, - }, 1, "semi_sync") + }, 1, policy.DurabilitySemiSync) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // find primary from topo diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go index 823655ed785..419a2e843c3 100644 --- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go +++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go @@ -165,7 +165,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Empty(t, replicaInstance.LastSQLError) assert.EqualValues(t, 0, replicaInstance.SQLDelay) assert.True(t, replicaInstance.UsingOracleGTID) - assert.False(t, replicaInstance.UsingMariaDBGTID) assert.Equal(t, replicaInstance.SourceUUID, primaryInstance.ServerUUID) assert.False(t, replicaInstance.HasReplicationFilters) assert.LessOrEqual(t, int(replicaInstance.SecondsBehindPrimary.Int64), 1) diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 456d55518dd..0a2d516fe63 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -40,6 +40,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" // Register topo implementations. _ "vitess.io/vitess/go/vt/topo/consultopo" @@ -299,7 +300,7 @@ func SetupVttabletsAndVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo, numRep } if durability == "" { - durability = "none" + durability = policy.DurabilityNone } out, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) require.NoError(t, err, out) diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index 9592440196a..7814be8ca83 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -28,10 +28,9 @@ import ( ) var ( - configKey = viperutil.KeyPrefixFunc("discovery") // lowReplicationLag defines the duration that replication lag is low enough that the VTTablet is considered healthy. lowReplicationLag = viperutil.Configure( - configKey("low_replication_lag"), + "discovery_low_replication_lag", viperutil.Options[time.Duration]{ FlagName: "discovery_low_replication_lag", Default: 30 * time.Second, @@ -39,7 +38,7 @@ var ( }, ) highReplicationLagMinServing = viperutil.Configure( - configKey("high_replication_lag"), + "discovery_high_replication_lag", viperutil.Options[time.Duration]{ FlagName: "discovery_high_replication_lag_minimum_serving", Default: 2 * time.Hour, @@ -47,7 +46,7 @@ var ( }, ) minNumTablets = viperutil.Configure( - configKey("min_number_serving_vttablets"), + "discovery_min_number_serving_vttablets", viperutil.Options[int]{ FlagName: "min_number_serving_vttablets", Default: 2, @@ -55,7 +54,7 @@ var ( }, ) legacyReplicationLagAlgorithm = viperutil.Configure( - configKey("legacy_replication_lag_algorithm"), + "discovery_legacy_replication_lag_algorithm", viperutil.Options[bool]{ FlagName: "legacy_replication_lag_algorithm", Default: true, diff --git a/go/vt/schemadiff/key.go b/go/vt/schemadiff/key.go index 865073a5a98..97f3af1630c 100644 --- a/go/vt/schemadiff/key.go +++ b/go/vt/schemadiff/key.go @@ -68,6 +68,16 @@ func (i *IndexDefinitionEntity) IsUnique() bool { return i.IndexDefinition.Info.IsUnique() } +// HasExpression returns true if the index uses an expression, e.g. `KEY idx1 ((id + 1))`. +func (i *IndexDefinitionEntity) HasExpression() bool { + for _, col := range i.IndexDefinition.Columns { + if col.Expression != nil { + return true + } + } + return false +} + // HasNullable returns true if any of the columns in the index are nullable. func (i *IndexDefinitionEntity) HasNullable() bool { for _, col := range i.ColumnList.Entities { diff --git a/go/vt/schemadiff/onlineddl.go b/go/vt/schemadiff/onlineddl.go index f02ccb1224d..06f3384c8fd 100644 --- a/go/vt/schemadiff/onlineddl.go +++ b/go/vt/schemadiff/onlineddl.go @@ -162,6 +162,11 @@ func PrioritizedUniqueKeys(createTableEntity *CreateTableEntity) *IndexDefinitio if !key.IsUnique() { continue } + if key.HasExpression() { + // If the key has an expression this means it unreliably covers the columns, + // we cannot trust it. + continue + } uniqueKeys = append(uniqueKeys, key) } sort.SliceStable(uniqueKeys, func(i, j int) bool { diff --git a/go/vt/schemadiff/onlineddl_test.go b/go/vt/schemadiff/onlineddl_test.go index 834490dca1b..f5309b4f943 100644 --- a/go/vt/schemadiff/onlineddl_test.go +++ b/go/vt/schemadiff/onlineddl_test.go @@ -932,6 +932,11 @@ func TestRevertible(t *testing.T) { toSchema: `id int primary key, e1 set('a', 'b'), e2 set('a'), e3 set('a', 'b', 'c'), e4 set('a', 'x'), e5 set('a', 'x', 'b'), e6 set('b'), e7 varchar(1), e8 tinyint`, expandedColumnNames: `e3,e4,e5,e6,e7,e8`, }, + { + name: "index with expression", + fromSchema: "id int, primary key (id), key idx1 ((id + 1))", + toSchema: "id int, primary key (id), key idx2 ((id + 2))", + }, } var ( diff --git a/go/vt/schemadiff/table.go b/go/vt/schemadiff/table.go index e002ef18e15..e9bb35cb3bd 100644 --- a/go/vt/schemadiff/table.go +++ b/go/vt/schemadiff/table.go @@ -500,9 +500,14 @@ func (c *CreateTableEntity) IndexDefinitionEntities() []*IndexDefinitionEntity { keys := c.CreateTable.TableSpec.Indexes entities := make([]*IndexDefinitionEntity, len(keys)) for i, key := range keys { - colEntities := make([]*ColumnDefinitionEntity, len(key.Columns)) - for i, keyCol := range key.Columns { - colEntities[i] = colMap[keyCol.Column.Lowered()] + colEntities := []*ColumnDefinitionEntity{} + for _, keyCol := range key.Columns { + colEntity, ok := colMap[keyCol.Column.Lowered()] + if !ok { + // This can happen if the index is on an expression, e.g. `KEY idx1 ((id + 1))`. + continue + } + colEntities = append(colEntities, colEntity) } entities[i] = NewIndexDefinitionEntity(c.Env, key, NewColumnDefinitionEntityList(colEntities)) } diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go index 84c40d769c2..ac871dbd4af 100644 --- a/go/vt/schemadiff/table_test.go +++ b/go/vt/schemadiff/table_test.go @@ -891,6 +891,18 @@ func TestCreateTableDiff(t *testing.T) { "+ KEY `i_idx` (`i`) INVISIBLE", }, }, + { + name: "keys with expression", + from: "create table t1 (id int, primary key (id), key idx1 ((id + 1)))", + to: "create table t1 (id int, primary key (id), key idx2 ((id + 2)))", + diff: "alter table t1 drop key idx1, add key idx2 ((id + 2))", + cdiff: "ALTER TABLE `t1` DROP KEY `idx1`, ADD KEY `idx2` ((`id` + 2))", + textdiffs: []string{ + "- KEY `idx1` ((`id` + 1))", + "+ KEY `idx2` ((`id` + 2))", + }, + }, + // FULLTEXT keys { name: "add one fulltext key", @@ -2564,6 +2576,12 @@ func TestValidate(t *testing.T) { alter: "alter table t engine=innodb", expectErr: &DuplicateKeyNameError{Table: "t", Key: "PRIMARY"}, }, + { + name: "key with expression", + from: "create table t (id int, primary key (id), key idx1 ((id + 1)))", + alter: "alter table t add key idx2 ((id + 2))", + to: "create table t (id int, primary key (id), key idx1 ((id + 1)), key idx2 ((id + 2)))", + }, // partitions { name: "drop column used by partitions", diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index 836c824010d..2891d532d16 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -2440,6 +2440,25 @@ func RemoveKeyspace(in SQLNode) { }) } +// RemoveKeyspaceIgnoreSysSchema removes the Qualifier.Qualifier on all ColNames and Qualifier on all TableNames in the AST +// except for the system schema. +func RemoveKeyspaceIgnoreSysSchema(in SQLNode) { + Rewrite(in, nil, func(cursor *Cursor) bool { + switch expr := cursor.Node().(type) { + case *ColName: + if expr.Qualifier.Qualifier.NotEmpty() && !SystemSchema(expr.Qualifier.Qualifier.String()) { + expr.Qualifier.Qualifier = NewIdentifierCS("") + } + case TableName: + if expr.Qualifier.NotEmpty() && !SystemSchema(expr.Qualifier.String()) { + expr.Qualifier = NewIdentifierCS("") + cursor.Replace(expr) + } + } + return true + }) +} + func convertStringToInt(integer string) int { val, _ := strconv.Atoi(integer) return val diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index f01b47cbd7b..c1484df7cc4 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -917,3 +917,11 @@ func TestCloneComments(t *testing.T) { assert.Equal(t, "b", val) } } + +func TestRemoveKeyspace(t *testing.T) { + stmt, err := NewTestParser().Parse("select 1 from uks.unsharded") + require.NoError(t, err) + RemoveKeyspaceIgnoreSysSchema(stmt) + + require.Equal(t, "select 1 from unsharded", String(stmt)) +} diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index c3dc22d21b4..706e0bec92a 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -71,6 +71,7 @@ import ( "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vtenv" @@ -668,7 +669,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } @@ -698,7 +699,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch // Since we want to check the durability rules for the desired state and not before we make that change expectedTablet := tablet.Tablet.CloneVT() expectedTablet.Type = req.DbType - err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, expectedTablet)) + err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, expectedTablet)) if err != nil { return nil, err } @@ -2776,7 +2777,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( return err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err } @@ -2860,7 +2861,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( // position logger.Infof("initializing primary on %v", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) event.DispatchUpdate(ev, "initializing primary") - rp, err := tmc.InitPrimary(ctx, primaryElectTabletInfo.Tablet, reparentutil.SemiSyncAckers(durability, primaryElectTabletInfo.Tablet) > 0) + rp, err := tmc.InitPrimary(ctx, primaryElectTabletInfo.Tablet, policy.SemiSyncAckers(durability, primaryElectTabletInfo.Tablet) > 0) if err != nil { return err } @@ -2901,7 +2902,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( go func(alias string, tabletInfo *topo.TabletInfo) { defer wgReplicas.Done() logger.Infof("initializing replica %v", alias) - if err := tmc.InitReplica(replCtx, tabletInfo.Tablet, req.PrimaryElectTabletAlias, rp, now, reparentutil.IsReplicaSemiSync(durability, primaryElectTabletInfo.Tablet, tabletInfo.Tablet)); err != nil { + if err := tmc.InitReplica(replCtx, tabletInfo.Tablet, req.PrimaryElectTabletAlias, rp, now, policy.IsReplicaSemiSync(durability, primaryElectTabletInfo.Tablet, tabletInfo.Tablet)); err != nil { rec.RecordError(fmt.Errorf("tablet %v InitReplica failed: %v", alias, err)) } }(alias, tabletInfo) @@ -3598,12 +3599,12 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } - if err = s.tmc.SetReplicationSource(ctx, tablet.Tablet, shard.PrimaryAlias, 0, "", false, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet), 0); err != nil { + if err = s.tmc.SetReplicationSource(ctx, tablet.Tablet, shard.PrimaryAlias, 0, "", false, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet), 0); err != nil { return nil, err } @@ -3787,7 +3788,7 @@ func (s *VtctldServer) SetKeyspaceDurabilityPolicy(ctx context.Context, req *vtc return nil, err } - policyValid := reparentutil.CheckDurabilityPolicyExists(req.DurabilityPolicy) + policyValid := policy.CheckDurabilityPolicyExists(req.DurabilityPolicy) if !policyValid { err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "durability policy <%v> is not a valid policy. Please register it as a policy first", req.DurabilityPolicy) return nil, err @@ -4308,12 +4309,12 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } - if err = s.tmc.StartReplication(ctx, tablet.Tablet, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet)); err != nil { + if err = s.tmc.StartReplication(ctx, tablet.Tablet, policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet)); err != nil { log.Errorf("StartReplication: failed to start replication on %v: %v", alias, err) return nil, err } @@ -4413,12 +4414,12 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct return nil, err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return nil, err } - if err = s.tmc.ChangeType(ctx, tablet.Tablet, topodatapb.TabletType_PRIMARY, reparentutil.SemiSyncAckers(durability, tablet.Tablet) > 0); err != nil { + if err = s.tmc.ChangeType(ctx, tablet.Tablet, topodatapb.TabletType_PRIMARY, policy.SemiSyncAckers(durability, tablet.Tablet) > 0); err != nil { log.Warningf("ChangeType(%v, PRIMARY): %v", topoproto.TabletAliasString(req.Tablet), err) return nil, err } diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 93f302a1097..31220918211 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -30,6 +30,7 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2777,14 +2778,14 @@ func TestCreateKeyspace(t *testing.T) { req: &vtctldatapb.CreateKeyspaceRequest{ Name: "testkeyspace", Type: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, }, expected: &vtctldatapb.CreateKeyspaceResponse{ Keyspace: &vtctldatapb.Keyspace{ Name: "testkeyspace", Keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, }, }, }, @@ -11338,11 +11339,11 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { }, req: &vtctldatapb.SetKeyspaceDurabilityPolicyRequest{ Keyspace: "ks1", - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, }, expected: &vtctldatapb.SetKeyspaceDurabilityPolicyResponse{ Keyspace: &topodatapb.Keyspace{ - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, }, }, }, @@ -13881,11 +13882,11 @@ func TestValidateSchemaKeyspace(t *testing.T) { } tests := []*struct { - name string - req *vtctldatapb.ValidateSchemaKeyspaceRequest - expected *vtctldatapb.ValidateSchemaKeyspaceResponse - setup func() - shouldErr bool + name string + req *vtctldatapb.ValidateSchemaKeyspaceRequest + expected *vtctldatapb.ValidateSchemaKeyspaceResponse + setup func() + err string }{ { name: "valid schemas", @@ -13908,7 +13909,6 @@ func TestValidateSchemaKeyspace(t *testing.T) { Uid: 101, }, schema1) }, - shouldErr: false, }, { name: "different schemas", @@ -13931,7 +13931,6 @@ func TestValidateSchemaKeyspace(t *testing.T) { Uid: 101, }, schema2) }, - shouldErr: false, }, { name: "skip-no-primary: no primary", @@ -13956,7 +13955,6 @@ func TestValidateSchemaKeyspace(t *testing.T) { Uid: 201, }, schema1) }, - shouldErr: false, }, } @@ -13964,8 +13962,8 @@ func TestValidateSchemaKeyspace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { tt.setup() resp, err := vtctld.ValidateSchemaKeyspace(ctx, tt.req) - if tt.shouldErr { - assert.Error(t, err) + if tt.err != "" { + assert.EqualError(t, err, tt.err) return } diff --git a/go/vt/vtctl/reparentutil/durability_funcs.go b/go/vt/vtctl/reparentutil/durability_funcs.go index 63e123a685d..da53c1a3e15 100644 --- a/go/vt/vtctl/reparentutil/durability_funcs.go +++ b/go/vt/vtctl/reparentutil/durability_funcs.go @@ -19,16 +19,17 @@ package reparentutil import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) // SemiSyncAckersForPrimary returns the list of tablets which are capable of sending Semi-Sync Acks for the given primary tablet -func SemiSyncAckersForPrimary(durability Durabler, primary *topodatapb.Tablet, allTablets []*topodatapb.Tablet) (semiSyncAckers []*topodatapb.Tablet) { +func SemiSyncAckersForPrimary(durability policy.Durabler, primary *topodatapb.Tablet, allTablets []*topodatapb.Tablet) (semiSyncAckers []*topodatapb.Tablet) { for _, tablet := range allTablets { if topoproto.TabletAliasEqual(primary.Alias, tablet.Alias) { continue } - if IsReplicaSemiSync(durability, primary, tablet) { + if policy.IsReplicaSemiSync(durability, primary, tablet) { semiSyncAckers = append(semiSyncAckers, tablet) } } @@ -37,7 +38,7 @@ func SemiSyncAckersForPrimary(durability Durabler, primary *topodatapb.Tablet, a // haveRevokedForTablet checks whether we have reached enough tablets such that the given primary eligible tablet cannot accept any new writes // The tablets reached should have their replication stopped and must be set to read only. -func haveRevokedForTablet(durability Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { +func haveRevokedForTablet(durability policy.Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { // if we have reached the primaryEligible tablet and stopped its replication and marked it read only, then it will not // accept any new writes if topoproto.IsTabletInList(primaryEligible, tabletsReached) { @@ -51,7 +52,7 @@ func haveRevokedForTablet(durability Durabler, primaryEligible *topodatapb.Table allSemiSyncAckers := SemiSyncAckersForPrimary(durability, primaryEligible, allTablets) // numOfSemiSyncAcksRequired is the number of semi sync Acks that the primaryEligible tablet requires - numOfSemiSyncAcksRequired := SemiSyncAckers(durability, primaryEligible) + numOfSemiSyncAcksRequired := policy.SemiSyncAckers(durability, primaryEligible) // if we have reached enough semi-sync Acking tablets such that the primaryEligible cannot accept a write // we have revoked from the tablet @@ -61,9 +62,9 @@ func haveRevokedForTablet(durability Durabler, primaryEligible *topodatapb.Table // haveRevoked checks whether we have reached enough tablets to guarantee that no tablet eligible to become a primary can accept any write // All the tablets reached must have their replication stopped and set to read only for us to guarantee that we have revoked access // from all the primary eligible tablets (prevent them from accepting any new writes) -func haveRevoked(durability Durabler, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { +func haveRevoked(durability policy.Durabler, tabletsReached []*topodatapb.Tablet, allTablets []*topodatapb.Tablet) bool { for _, tablet := range allTablets { - if PromotionRule(durability, tablet) == promotionrule.MustNot { + if policy.PromotionRule(durability, tablet) == promotionrule.MustNot { continue } if !haveRevokedForTablet(durability, tablet, tabletsReached, allTablets) { @@ -74,7 +75,7 @@ func haveRevoked(durability Durabler, tabletsReached []*topodatapb.Tablet, allTa } // canEstablishForTablet checks whether we have reached enough tablets to say that the given primary eligible tablet will be able to accept new writes -func canEstablishForTablet(durability Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet) bool { +func canEstablishForTablet(durability policy.Durabler, primaryEligible *topodatapb.Tablet, tabletsReached []*topodatapb.Tablet) bool { // if we have not reached the primaryEligible tablet, then it cannot be considered eligible to accept writes // since it might have been stopped if !topoproto.IsTabletInList(primaryEligible, tabletsReached) { @@ -85,7 +86,7 @@ func canEstablishForTablet(durability Durabler, primaryEligible *topodatapb.Tabl semiSyncAckersReached := SemiSyncAckersForPrimary(durability, primaryEligible, tabletsReached) // numOfSemiSyncAcksRequired is the number of semi sync Acks that the primaryEligible tablet requires - numOfSemiSyncAcksRequired := SemiSyncAckers(durability, primaryEligible) + numOfSemiSyncAcksRequired := policy.SemiSyncAckers(durability, primaryEligible) // if we have reached enough semi-sync Acking tablets such that the primaryEligible can accept a write // we can safely promote this tablet diff --git a/go/vt/vtctl/reparentutil/durability_funcs_test.go b/go/vt/vtctl/reparentutil/durability_funcs_test.go index 21eb308a4b0..737d3e40346 100644 --- a/go/vt/vtctl/reparentutil/durability_funcs_test.go +++ b/go/vt/vtctl/reparentutil/durability_funcs_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) var ( @@ -73,25 +74,25 @@ func TestSemiSyncAckersForPrimary(t *testing.T) { }{ { name: "no other tablets", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet}, wantSemiSyncAckers: nil, }, { name: "'none' durability policy", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: nil, }, { name: "'semi_sync' durability policy", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet}, }, { name: "'cross_cell' durability policy", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, primary: primaryTablet, allTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, replicaCrossCellTablet, rdonlyCrossCellTablet}, wantSemiSyncAckers: []*topodatapb.Tablet{replicaCrossCellTablet}, @@ -99,7 +100,7 @@ func TestSemiSyncAckersForPrimary(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err, "error setting durability policy") semiSyncAckers := SemiSyncAckersForPrimary(durability, tt.primary, tt.allTablets) require.Equal(t, tt.wantSemiSyncAckers, semiSyncAckers) @@ -118,7 +119,7 @@ func Test_haveRevokedForTablet(t *testing.T) { }{ { name: "'none' durability policy - not revoked", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -129,7 +130,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'none' durability policy - revoked", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -140,7 +141,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -151,7 +152,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - not revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -162,7 +163,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -173,7 +174,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - not revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -184,7 +185,7 @@ func Test_haveRevokedForTablet(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - primary in list", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, allTablets: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -197,7 +198,7 @@ func Test_haveRevokedForTablet(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err) out := haveRevokedForTablet(durability, tt.primaryEligible, tt.tabletsReached, tt.allTablets) require.Equal(t, tt.revoked, out) @@ -215,7 +216,7 @@ func Test_haveRevoked(t *testing.T) { }{ { name: "'none' durability policy - all tablets revoked", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -225,7 +226,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - all tablets revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -235,7 +236,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - all tablets revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -245,7 +246,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'none' durability policy - revoked", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, replicaCrossCellTablet, }, @@ -255,7 +256,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'semi_sync' durability policy - revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ replicaTablet, replicaCrossCellTablet, rdonlyTablet, }, @@ -265,7 +266,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'cross_cell' durability policy - revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ replicaCrossCellTablet, }, @@ -275,7 +276,7 @@ func Test_haveRevoked(t *testing.T) { revoked: true, }, { name: "'none' durability policy - not revoked", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -285,7 +286,7 @@ func Test_haveRevoked(t *testing.T) { revoked: false, }, { name: "'semi_sync' durability policy - not revoked", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -295,7 +296,7 @@ func Test_haveRevoked(t *testing.T) { revoked: false, }, { name: "'cross_cell' durability policy - not revoked", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, }, @@ -307,7 +308,7 @@ func Test_haveRevoked(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err) out := haveRevoked(durability, tt.tabletsReached, tt.allTablets) require.Equal(t, tt.revoked, out) @@ -325,7 +326,7 @@ func Test_canEstablishForTablet(t *testing.T) { }{ { name: "primary not reached", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ replicaTablet, replicaCrossCellTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -333,7 +334,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "not established", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -341,7 +342,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "not established", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, rdonlyCrossCellTablet, rdonlyTablet, @@ -349,7 +350,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: false, }, { name: "established", - durabilityPolicy: "none", + durabilityPolicy: policy.DurabilityNone, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, @@ -357,7 +358,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: true, }, { name: "established", - durabilityPolicy: "semi_sync", + durabilityPolicy: policy.DurabilitySemiSync, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaTablet, @@ -365,7 +366,7 @@ func Test_canEstablishForTablet(t *testing.T) { canEstablish: true, }, { name: "established", - durabilityPolicy: "cross_cell", + durabilityPolicy: policy.DurabilityCrossCell, primaryEligible: primaryTablet, tabletsReached: []*topodatapb.Tablet{ primaryTablet, replicaCrossCellTablet, @@ -375,7 +376,7 @@ func Test_canEstablishForTablet(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("'%s' durability policy - %s", tt.durabilityPolicy, tt.name), func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(tt.durabilityPolicy) require.NoError(t, err) require.Equalf(t, tt.canEstablish, canEstablishForTablet(durability, tt.primaryEligible, tt.tabletsReached), "canEstablishForTablet(%v, %v)", tt.primaryEligible, tt.tabletsReached) }) diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 70faf8958c7..5f7d3140c7b 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/event" "vitess.io/vitess/go/sets" @@ -66,7 +67,7 @@ type EmergencyReparentOptions struct { // Private options managed internally. We use value passing to avoid leaking // these details back out. lockAction string - durability Durabler + durability policy.Durabler } // counters for Emergency Reparent Shard @@ -181,7 +182,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve } erp.logger.Infof("Getting a new durability policy for %v", keyspaceDurability) - opts.durability, err = GetDurabilityPolicy(keyspaceDurability) + opts.durability, err = policy.GetDurabilityPolicy(keyspaceDurability) if err != nil { return err } @@ -539,11 +540,11 @@ func (erp *EmergencyReparenter) reparentReplicas( if ev.ShardInfo.PrimaryAlias == nil { erp.logger.Infof("setting up %v as new primary for an uninitialized cluster", alias) // we call InitPrimary when the PrimaryAlias in the ShardInfo is empty. This happens when we have an uninitialized cluster. - position, err = erp.tmc.InitPrimary(primaryCtx, tablet, SemiSyncAckers(opts.durability, tablet) > 0) + position, err = erp.tmc.InitPrimary(primaryCtx, tablet, policy.SemiSyncAckers(opts.durability, tablet) > 0) } else { erp.logger.Infof("starting promotion for the new primary - %v", alias) // we call PromoteReplica which changes the tablet type, fixes the semi-sync, set the primary to read-write and flushes the binlogs - position, err = erp.tmc.PromoteReplica(primaryCtx, tablet, SemiSyncAckers(opts.durability, tablet) > 0) + position, err = erp.tmc.PromoteReplica(primaryCtx, tablet, policy.SemiSyncAckers(opts.durability, tablet) > 0) } if err != nil { return vterrors.Wrapf(err, "primary-elect tablet %v failed to be upgraded to primary: %v", alias, err) @@ -574,7 +575,7 @@ func (erp *EmergencyReparenter) reparentReplicas( forceStart = fs } - err := erp.tmc.SetReplicationSource(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart, IsReplicaSemiSync(opts.durability, newPrimaryTablet, ti.Tablet), 0) + err := erp.tmc.SetReplicationSource(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart, policy.IsReplicaSemiSync(opts.durability, newPrimaryTablet, ti.Tablet), 0) if err != nil { err = vterrors.Wrapf(err, "tablet %v SetReplicationSource failed: %v", alias, err) rec.RecordError(err) @@ -746,7 +747,7 @@ func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb for _, tablet := range validTablets { tabletAliasStr := topoproto.TabletAliasString(tablet.Alias) // Remove tablets which have MustNot promote rule since they must never be promoted - if PromotionRule(opts.durability, tablet) == promotionrule.MustNot { + if policy.PromotionRule(opts.durability, tablet) == promotionrule.MustNot { erp.logger.Infof("Removing %s from list of valid candidates for promotion because it has the Must Not promote rule", tabletAliasStr) if opts.NewPrimaryAlias != nil && topoproto.TabletAliasEqual(opts.NewPrimaryAlias, tablet.Alias) { return nil, vterrors.Errorf(vtrpc.Code_ABORTED, "proposed primary %s has a must not promotion rule", topoproto.TabletAliasString(opts.NewPrimaryAlias)) diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index 3669c34dc11..840df41d6e2 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql/replication" logutilpb "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/mysql" @@ -129,7 +130,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ { name: "success", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -238,7 +239,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "success - 1 replica and 1 rdonly failure", - durability: "semi_sync", + durability: policy.DurabilitySemiSync, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -372,7 +373,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { // Here, all our tablets are tied, so we're going to explicitly pick // zone1-101. name: "success with requested primary-elect", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 101, @@ -483,7 +484,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "success with existing primary", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { @@ -594,7 +595,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard not found", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{}, unlockTopo: true, // we shouldn't try to lock the nonexistent shard @@ -607,7 +608,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot stop replication", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -666,7 +667,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "lost topo lock", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -725,7 +726,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot get reparent candidates", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ StopReplicationAndGetStatusResults: map[string]struct { @@ -799,7 +800,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "zero valid reparent candidates", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{}, shards: []*vtctldatapb.Shard{ @@ -816,7 +817,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "error waiting for relay logs to apply", - durability: "none", + durability: policy.DurabilityNone, // one replica is going to take a minute to apply relay logs emergencyReparentOps: EmergencyReparentOptions{ WaitReplicasTimeout: time.Millisecond * 50, @@ -911,7 +912,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "requested primary-elect is not in tablet map", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 200, @@ -1001,7 +1002,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "requested primary-elect is not winning primary-elect", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ // we're requesting a tablet that's behind in replication Cell: "zone1", Uid: 102, @@ -1124,7 +1125,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cannot promote new primary", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 102, @@ -1237,7 +1238,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "promotion-rule - no valid candidates for emergency reparent", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -1344,7 +1345,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary - must not promotion rule", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -1456,7 +1457,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "cross cell - no valid candidates", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{PreventCrossCellPromotion: true}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ @@ -1575,7 +1576,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary in a different cell", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ PreventCrossCellPromotion: true, NewPrimaryAlias: &topodatapb.TabletAlias{ @@ -1700,7 +1701,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "proposed primary cannot make progress", - durability: "cross_cell", + durability: policy.DurabilityCrossCell, emergencyReparentOps: EmergencyReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -1815,7 +1816,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, { name: "expected primary mismatch", - durability: "none", + durability: policy.DurabilityNone, emergencyReparentOps: EmergencyReparentOptions{ ExpectedPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -2333,7 +2334,7 @@ func TestEmergencyReparenter_promotionOfNewPrimary(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3021,7 +3022,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -3502,7 +3503,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.remoteOpTimeout != 0 { @@ -4092,7 +4093,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4336,7 +4337,7 @@ func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - durability, _ := GetDurabilityPolicy("none") + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) test.emergencyReparentOps.durability = durability logger := logutil.NewMemoryLogger() @@ -4355,7 +4356,7 @@ func TestEmergencyReparenter_identifyPrimaryCandidate(t *testing.T) { // TestParentContextCancelled tests that even if the parent context of reparentReplicas cancels, we should not cancel the context of // SetReplicationSource since there could be tablets that are running it even after ERS completes. func TestParentContextCancelled(t *testing.T) { - durability, err := GetDurabilityPolicy("none") + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) // Setup ERS options with a very high wait replicas timeout emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability} @@ -4486,28 +4487,28 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { }{ { name: "filter must not", - durability: "none", + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet, replicaCrossCellTablet}, }, { name: "host taking backup must not be on the list when there are other candidates", - durability: "none", + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: []*topodatapb.Tablet{replicaTablet, replicaCrossCellTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: replicaTakingBackup, filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet}, }, { name: "host taking backup must be the only one on the list when there are no other candidates", - durability: "none", + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: []*topodatapb.Tablet{replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: replicaTakingBackup, filteredTablets: []*topodatapb.Tablet{replicaTablet}, }, { name: "filter cross cell", - durability: "none", + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4523,14 +4524,14 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { filteredTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet}, }, { name: "filter establish", - durability: "cross_cell", + durability: policy.DurabilityCrossCell, validTablets: []*topodatapb.Tablet{primaryTablet, replicaTablet}, tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, tabletsTakingBackup: noTabletsTakingBackup, filteredTablets: nil, }, { name: "filter mixed", - durability: "cross_cell", + durability: policy.DurabilityCrossCell, prevPrimary: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone-2", @@ -4545,7 +4546,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { filteredTablets: []*topodatapb.Tablet{replicaCrossCellTablet}, }, { name: "error - requested primary must not", - durability: "none", + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4555,7 +4556,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { errShouldContain: "proposed primary zone-1-0000000003 has a must not promotion rule", }, { name: "error - requested primary not in same cell", - durability: "none", + durability: policy.DurabilityNone, validTablets: allTablets, tabletsReachable: allTablets, tabletsTakingBackup: noTabletsTakingBackup, @@ -4567,7 +4568,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { errShouldContain: "proposed primary zone-2-0000000002 is is a different cell as the previous primary", }, { name: "error - requested primary cannot establish", - durability: "cross_cell", + durability: policy.DurabilityCrossCell, validTablets: allTablets, tabletsTakingBackup: noTabletsTakingBackup, tabletsReachable: []*topodatapb.Tablet{primaryTablet, replicaTablet, rdonlyTablet, rdonlyCrossCellTablet}, @@ -4579,7 +4580,7 @@ func TestEmergencyReparenter_filterValidCandidates(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durability) + durability, err := policy.GetDurabilityPolicy(tt.durability) require.NoError(t, err) tt.opts.durability = durability logger := logutil.NewMemoryLogger() @@ -5525,7 +5526,7 @@ func TestEmergencyReparenterFindErrantGTIDs(t *testing.T) { slices.Sort(keys) require.ElementsMatch(t, tt.wantedCandidates, keys) - dp, err := GetDurabilityPolicy("semi_sync") + dp, err := policy.GetDurabilityPolicy(policy.DurabilitySemiSync) require.NoError(t, err) ers := EmergencyReparenter{logger: logutil.NewCallbackLogger(func(*logutilpb.Event) {})} winningPrimary, _, err := ers.findMostAdvanced(candidates, tt.tabletMap, EmergencyReparentOptions{durability: dp}) diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index 91669e33b5f..dcd6dc7c590 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -36,6 +36,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" ) @@ -71,7 +72,7 @@ type PlannedReparentOptions struct { // back out to the caller. lockAction string - durability Durabler + durability policy.Durabler } // NewPlannedReparenter returns a new PlannedReparenter object, ready to perform @@ -256,7 +257,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( setSourceCtx, setSourceCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) defer setSourceCancel() - if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect), 0); err != nil { + if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, policy.IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect), 0); err != nil { return vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr) } @@ -304,7 +305,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( undoCtx, undoCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer undoCancel() - if undoErr := pr.tmc.UndoDemotePrimary(undoCtx, currentPrimary.Tablet, SemiSyncAckers(opts.durability, currentPrimary.Tablet) > 0); undoErr != nil { + if undoErr := pr.tmc.UndoDemotePrimary(undoCtx, currentPrimary.Tablet, policy.SemiSyncAckers(opts.durability, currentPrimary.Tablet) > 0); undoErr != nil { pr.logger.Warningf("encountered error while performing UndoDemotePrimary(%v): %v", currentPrimary.AliasString(), undoErr) finalWaitErr = vterrors.Wrapf(finalWaitErr, "encountered error while performing UndoDemotePrimary(%v): %v", currentPrimary.AliasString(), undoErr) } @@ -332,7 +333,7 @@ func (pr *PlannedReparenter) performInitialPromotion( // This is done to guarantee safety, in the sense that the semi-sync is on before we start accepting writes. // However, during initialization, it is likely that the database would not be created in the MySQL instance. // Therefore, we have to first set read-write mode, create the database and then fix semi-sync, otherwise we get blocked. - rp, err := pr.tmc.InitPrimary(promoteCtx, primaryElect, SemiSyncAckers(opts.durability, primaryElect) > 0) + rp, err := pr.tmc.InitPrimary(promoteCtx, primaryElect, policy.SemiSyncAckers(opts.durability, primaryElect) > 0) if err != nil { return "", vterrors.Wrapf(err, "primary-elect tablet %v failed to be promoted to primary; please try again", primaryElectAliasStr) } @@ -521,7 +522,7 @@ func (pr *PlannedReparenter) reparentShardLocked( } pr.logger.Infof("Getting a new durability policy for %v", keyspaceDurability) - opts.durability, err = GetDurabilityPolicy(keyspaceDurability) + opts.durability, err = policy.GetDurabilityPolicy(keyspaceDurability) if err != nil { return err } @@ -693,7 +694,7 @@ func (pr *PlannedReparenter) reparentTablets( // that it needs to start replication after transitioning from // PRIMARY => REPLICA. forceStartReplication := false - if err := pr.tmc.SetReplicationSource(replCtx, tablet, ev.NewPrimary.Alias, reparentJournalTimestamp, "", forceStartReplication, IsReplicaSemiSync(opts.durability, ev.NewPrimary, tablet), 0); err != nil { + if err := pr.tmc.SetReplicationSource(replCtx, tablet, ev.NewPrimary.Alias, reparentJournalTimestamp, "", forceStartReplication, policy.IsReplicaSemiSync(opts.durability, ev.NewPrimary, tablet), 0); err != nil { rec.RecordError(vterrors.Wrapf(err, "tablet %v failed to SetReplicationSource(%v): %v", alias, primaryElectAliasStr, err)) } }(alias, tabletInfo.Tablet) @@ -702,7 +703,7 @@ func (pr *PlannedReparenter) reparentTablets( // If `PromoteReplica` call is required, we should call it and use the position that it returns. if promoteReplicaRequired { // Promote the candidate primary to type:PRIMARY. - primaryPosition, err := pr.tmc.PromoteReplica(replCtx, ev.NewPrimary, SemiSyncAckers(opts.durability, ev.NewPrimary) > 0) + primaryPosition, err := pr.tmc.PromoteReplica(replCtx, ev.NewPrimary, policy.SemiSyncAckers(opts.durability, ev.NewPrimary) > 0) if err != nil { pr.logger.Warningf("primary %v failed to PromoteReplica; cancelling replica reparent attempts", primaryElectAliasStr) replCancel() diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go index 8e2ee8f9df7..148d9fd812c 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go @@ -25,6 +25,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/test/utils" @@ -557,6 +558,11 @@ func TestPlannedReparenter_getLockAction(t *testing.T) { } } +func getDurabilityPolicy(policyName string) policy.Durabler { + p, _ := policy.GetDurabilityPolicy(policyName) + return p +} + func TestPlannedReparenter_preflightChecks(t *testing.T) { t.Parallel() @@ -791,7 +797,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 500, }, - durability: &durabilityNone{}, }, expectedIsNoop: false, expectedEvent: &events.Reparent{ @@ -819,7 +824,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 100, }, - durability: &durabilityNone{}, + durability: getDurabilityPolicy(policy.DurabilityNone), }, shouldErr: false, }, @@ -889,7 +894,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 500, }, - durability: &durabilityNone{}, }, expectedIsNoop: false, expectedEvent: &events.Reparent{ @@ -917,7 +921,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 101, }, - durability: &durabilityNone{}, + durability: getDurabilityPolicy(policy.DurabilityNone), }, shouldErr: false, }, @@ -1148,7 +1152,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { Cell: "zone1", Uid: 500, }, - durability: &durabilityCrossCell{}, + durability: getDurabilityPolicy(policy.DurabilityCrossCell), }, expectedIsNoop: true, expectedEvent: &events.Reparent{ @@ -1186,7 +1190,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.opts.durability == nil { - durability, err := GetDurabilityPolicy("none") + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) tt.opts.durability = durability } @@ -1799,7 +1803,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { ctx = _ctx } - durability, err := GetDurabilityPolicy("none") + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) tt.opts.durability = durability @@ -1946,7 +1950,7 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { ctx = _ctx } - durability, err := GetDurabilityPolicy("none") + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) pos, err := pr.performInitialPromotion( ctx, @@ -3423,7 +3427,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }{ { name: "success - durability = none", - durability: "none", + durability: policy.DurabilityNone, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3490,7 +3494,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }, { name: "success - durability = semi_sync", - durability: "semi_sync", + durability: policy.DurabilitySemiSync, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, @@ -3556,7 +3560,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { shouldErr: false, }, { name: "success - promote replica required", - durability: "semi_sync", + durability: policy.DurabilitySemiSync, promoteReplicaRequired: true, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { @@ -3632,7 +3636,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { shouldErr: false, }, { name: "Promote replica failed", - durability: "semi_sync", + durability: policy.DurabilitySemiSync, promoteReplicaRequired: true, tmc: &testutil.TabletManagerClient{ PromoteReplicaResults: map[string]struct { @@ -3977,11 +3981,11 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { t.Parallel() pr := NewPlannedReparenter(nil, tt.tmc, logger) - durabilityPolicy := "none" + durabilityPolicy := policy.DurabilityNone if tt.durability != "" { durabilityPolicy = tt.durability } - durability, err := GetDurabilityPolicy(durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(durabilityPolicy) require.NoError(t, err) tt.opts.durability = durability err = pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.promoteReplicaRequired, tt.tabletMap, tt.opts) diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/policy/durability.go similarity index 84% rename from go/vt/vtctl/reparentutil/durability.go rename to go/vt/vtctl/reparentutil/policy/durability.go index 29a5b2e712a..bad6846ef29 100644 --- a/go/vt/vtctl/reparentutil/durability.go +++ b/go/vt/vtctl/reparentutil/policy/durability.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparentutil +package policy import ( "fmt" @@ -37,32 +37,47 @@ var ( durabilityPolicies = make(map[string]NewDurabler) ) +const ( + // DurabilityNone is the name of the durability policy that has no semi-sync setup. + DurabilityNone = "none" + // DurabilitySemiSync is the name of the durability policy that has 1 semi-sync setup. + DurabilitySemiSync = "semi_sync" + // DurabilityCrossCell is the name of the durability policy that has 1 semi-sync setup but only allows Primary and Replica type servers from a different cell to acknowledge semi sync. + DurabilityCrossCell = "cross_cell" + // DurabilitySemiSyncWithRdonlyAck is the name of the durability policy that has 1 semi-sync setup and allows the "rdonly" to send semi-sync acks as well. + DurabilitySemiSyncWithRdonlyAck = "semi_sync_with_rdonly_ack" + // DurabilityCrossCellWithRdonlyAck is the name of the durability policy that has 1 semi-sync setup but only allows Primary and Replica type servers from a different cell to acknowledge semi sync. It also allows the "rdonly" to send semi-sync acks. + DurabilityCrossCellWithRdonlyAck = "cross_cell_with_rdonly_ack" + // DurabilityTest is the name of the durability policy that has no semi-sync setup but overrides the type for a specific tablet to prefer. It is only meant to be used for testing purposes! + DurabilityTest = "test" +) + func init() { // register all the durability rules with their functions to create them - RegisterDurability("none", func() Durabler { + RegisterDurability(DurabilityNone, func() Durabler { return &durabilityNone{} }) - RegisterDurability("semi_sync", func() Durabler { + RegisterDurability(DurabilitySemiSync, func() Durabler { return &durabilitySemiSync{ rdonlySemiSync: false, } }) - RegisterDurability("cross_cell", func() Durabler { + RegisterDurability(DurabilityCrossCell, func() Durabler { return &durabilityCrossCell{ rdonlySemiSync: false, } }) - RegisterDurability("semi_sync_with_rdonly_ack", func() Durabler { + RegisterDurability(DurabilitySemiSyncWithRdonlyAck, func() Durabler { return &durabilitySemiSync{ rdonlySemiSync: true, } }) - RegisterDurability("cross_cell_with_rdonly_ack", func() Durabler { + RegisterDurability(DurabilityCrossCellWithRdonlyAck, func() Durabler { return &durabilityCrossCell{ rdonlySemiSync: true, } }) - RegisterDurability("test", func() Durabler { + RegisterDurability(DurabilityTest, func() Durabler { return &durabilityTest{} }) } diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/policy/durability_test.go similarity index 97% rename from go/vt/vtctl/reparentutil/durability_test.go rename to go/vt/vtctl/reparentutil/policy/durability_test.go index 5745da64f7e..441275f29bf 100644 --- a/go/vt/vtctl/reparentutil/durability_test.go +++ b/go/vt/vtctl/reparentutil/policy/durability_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reparentutil +package policy import ( "testing" @@ -29,7 +29,7 @@ import ( ) func TestDurabilityNone(t *testing.T) { - durability, err := GetDurabilityPolicy("none") + durability, err := GetDurabilityPolicy(DurabilityNone) require.NoError(t, err) promoteRule := PromotionRule(durability, &topodatapb.Tablet{ @@ -78,10 +78,10 @@ func TestDurabilitySemiSync(t *testing.T) { rdonlySemiSync bool }{ { - durabilityPolicy: "semi_sync", + durabilityPolicy: DurabilitySemiSync, rdonlySemiSync: false, }, { - durabilityPolicy: "semi_sync_with_rdonly_ack", + durabilityPolicy: DurabilitySemiSyncWithRdonlyAck, rdonlySemiSync: true, }, } @@ -176,10 +176,10 @@ func TestDurabilityCrossCell(t *testing.T) { rdonlySemiSync bool }{ { - durabilityPolicy: "cross_cell", + durabilityPolicy: DurabilityCrossCell, rdonlySemiSync: false, }, { - durabilityPolicy: "cross_cell_with_rdonly_ack", + durabilityPolicy: DurabilityCrossCellWithRdonlyAck, rdonlySemiSync: true, }, } diff --git a/go/vt/vtctl/reparentutil/reparent_sorter.go b/go/vt/vtctl/reparentutil/reparent_sorter.go index ea7367bd36b..2f9c3c9ea8d 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter.go @@ -20,6 +20,7 @@ import ( "sort" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -32,11 +33,11 @@ type reparentSorter struct { tablets []*topodatapb.Tablet positions []replication.Position innodbBufferPool []int - durability Durabler + durability policy.Durabler } // newReparentSorter creates a new reparentSorter -func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability Durabler) *reparentSorter { +func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability policy.Durabler) *reparentSorter { return &reparentSorter{ tablets: tablets, positions: positions, @@ -82,8 +83,8 @@ func (rs *reparentSorter) Less(i, j int) bool { // at this point, both have the same GTIDs // so we check their promotion rules - jPromotionRule := PromotionRule(rs.durability, rs.tablets[j]) - iPromotionRule := PromotionRule(rs.durability, rs.tablets[i]) + jPromotionRule := policy.PromotionRule(rs.durability, rs.tablets[j]) + iPromotionRule := policy.PromotionRule(rs.durability, rs.tablets[i]) // If the promotion rules are different then we want to sort by the promotion rules. if len(rs.innodbBufferPool) != 0 && jPromotionRule == iPromotionRule { @@ -100,7 +101,7 @@ func (rs *reparentSorter) Less(i, j int) bool { // sortTabletsForReparent sorts the tablets, given their positions for emergency reparent shard and planned reparent shard. // Tablets are sorted first by their replication positions, with ties broken by the promotion rules. -func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability Durabler) error { +func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []replication.Position, innodbBufferPool []int, durability policy.Durabler) error { // throw an error internal error in case of unequal number of tablets and positions // fail-safe code prevents panic in sorting in case the lengths are unequal if len(tablets) != len(positions) { diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go index 87e7b253d54..86aa129f1a4 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/mysql/replication" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" ) // TestReparentSorter tests that the sorting for tablets works correctly @@ -135,7 +136,7 @@ func TestReparentSorter(t *testing.T) { }, } - durability, err := GetDurabilityPolicy("none") + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index 17dbaeae015..1e1c2b98369 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/topotools/events" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" ) @@ -153,12 +154,12 @@ func SetReplicationSource(ctx context.Context, ts *topo.Server, tmc tmclient.Tab return err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err } - isSemiSync := IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet) + isSemiSync := policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet) return tmc.SetReplicationSource(ctx, tablet, shardPrimary.Alias, 0, "", false, isSemiSync, 0) } @@ -183,7 +184,7 @@ func stopReplicationAndBuildStatusMaps( stopReplicationTimeout time.Duration, ignoredTablets sets.Set[string], tabletToWaitFor *topodatapb.TabletAlias, - durability Durabler, + durability policy.Durabler, waitForAllTablets bool, logger logutil.Logger, ) (*replicationSnapshot, error) { diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 1b36186efb8..1f8e5d097b7 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" @@ -289,7 +290,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ { name: "success", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -358,7 +359,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "success with wait for all tablets", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -428,7 +429,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "timing check with wait for all tablets", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -514,7 +515,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "success - 2 rdonly failures", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -608,7 +609,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "success - 1 rdonly and 1 replica failures", - durability: "semi_sync", + durability: policy.DurabilitySemiSync, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -702,7 +703,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "ignore tablets", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -762,7 +763,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "have PRIMARY tablet and can demote", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -841,7 +842,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "one tablet is PRIMARY and cannot demote", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -906,7 +907,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "multiple tablets are PRIMARY and cannot demote", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ demotePrimaryResults: map[string]*struct { PrimaryStatus *replicationdatapb.PrimaryStatus @@ -959,7 +960,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "stopReplicasTimeout exceeded", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusDelays: map[string]time.Duration{ "zone1-0000000100": time.Minute, // zone1-0000000100 will timeout and not be included @@ -1023,7 +1024,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "one tablet fails to StopReplication", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1080,7 +1081,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "multiple tablets fail StopReplication", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1121,7 +1122,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: true, }, { name: "1 tablets fail StopReplication and 1 has replication stopped", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1166,7 +1167,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, { name: "slow tablet is the new primary requested", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusDelays: map[string]time.Duration{ "zone1-0000000102": 1 * time.Second, // zone1-0000000102 is slow to respond but has to be included since it is the requested primary @@ -1268,7 +1269,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { shouldErr: false, }, { name: "Handle nil replication status After. No segfaulting when determining backup status, and fall back to Before status", - durability: "none", + durability: policy.DurabilityNone, tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ stopReplicationAndGetStatusResults: map[string]*struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1340,7 +1341,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - durability, err := GetDurabilityPolicy(tt.durability) + durability, err := policy.GetDurabilityPolicy(tt.durability) require.NoError(t, err) startTime := time.Now() res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, tt.waitForAllTablets, logger) diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index c4c23e65c7e..4b8a4cbc431 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -345,9 +346,9 @@ func findCandidate( } // getTabletsWithPromotionRules gets the tablets with the given promotion rule from the list of tablets -func getTabletsWithPromotionRules(durability Durabler, tablets []*topodatapb.Tablet, rule promotionrule.CandidatePromotionRule) (res []*topodatapb.Tablet) { +func getTabletsWithPromotionRules(durability policy.Durabler, tablets []*topodatapb.Tablet, rule promotionrule.CandidatePromotionRule) (res []*topodatapb.Tablet) { for _, candidate := range tablets { - promotionRule := PromotionRule(durability, candidate) + promotionRule := policy.PromotionRule(durability, candidate) if promotionRule == rule { res = append(res, candidate) } diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index ac44da8175a..276bab2e443 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/utils" @@ -1014,7 +1015,7 @@ zone1-0000000100 is not a replica`, }, } - durability, err := GetDurabilityPolicy("none") + durability, err := policy.GetDurabilityPolicy(policy.DurabilityNone) require.NoError(t, err) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -1829,7 +1830,7 @@ func Test_getTabletsWithPromotionRules(t *testing.T) { filteredTablets: nil, }, } - durability, _ := GetDurabilityPolicy("none") + durability, _ := policy.GetDurabilityPolicy(policy.DurabilityNone) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res := getTabletsWithPromotionRules(durability, tt.tablets, tt.rule) diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 1a4735b1c82..9ad64c3d4fd 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -100,6 +100,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/ptr" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/flagutil" @@ -1818,7 +1819,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags keyspaceType := subFlags.String("keyspace_type", "", "Specifies the type of the keyspace") baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") - durabilityPolicy := subFlags.String("durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + durabilityPolicy := subFlags.String("durability-policy", policy.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") sidecarDBName := subFlags.String("sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") if err := subFlags.Parse(args); err != nil { return err @@ -1840,7 +1841,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags var snapshotTime *vttime.Time if ktype == topodatapb.KeyspaceType_SNAPSHOT { - if *durabilityPolicy != "none" { + if *durabilityPolicy != policy.DurabilityNone { return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "durability-policy cannot be specified while creating a snapshot keyspace") } if *baseKeyspace == "" { diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index d8ac8beccc1..4166ca3293b 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/servenv/testutils" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/wrangler" @@ -53,7 +54,7 @@ func TestAPI(t *testing.T) { defer server.Close() ks1 := &topodatapb.Keyspace{ - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, SidecarDbName: "_vt_sidecar_ks1", } diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index f9ae8be3820..d45073cd006 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -74,7 +74,7 @@ func (vte *VTExplain) initVtgateExecutor(ctx context.Context, ts *topo.Server, v var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests queryLogBufferSize := 10 plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false) - vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.env, vte.explainTopo, Cell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0) + vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.env, vte.explainTopo, Cell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0, vtgate.NewDynamicViperConfig()) vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) return nil @@ -88,7 +88,9 @@ func (vte *VTExplain) newFakeResolver(ctx context.Context, opts *Options, serv s if opts.ExecutionMode == ModeTwoPC { txMode = vtgatepb.TransactionMode_TWOPC } - tc := vtgate.NewTxConn(gw, txMode) + tc := vtgate.NewTxConn(gw, &vtgate.StaticConfig{ + TxMode: txMode, + }) sc := vtgate.NewScatterConn("", tc, gw) srvResolver := srvtopo.NewResolver(serv, gw, cell) return vtgate.NewResolver(srvResolver, serv, cell, sc) diff --git a/go/vt/vtgate/dynamicconfig/config.go b/go/vt/vtgate/dynamicconfig/config.go index 5bb1d991eae..014160029cd 100644 --- a/go/vt/vtgate/dynamicconfig/config.go +++ b/go/vt/vtgate/dynamicconfig/config.go @@ -1,6 +1,28 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package dynamicconfig +import vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + type DDL interface { OnlineEnabled() bool DirectEnabled() bool } + +type TxMode interface { + TransactionMode() vtgatepb.TransactionMode +} diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index e59832cdab5..50d3a4b6bbf 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -465,7 +465,7 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(240) + size += int64(224) } // field InsertCommon vitess.io/vitess/go/vt/vtgate/engine.InsertCommon size += cached.InsertCommon.CachedSize(false) diff --git a/go/vt/vtgate/engine/fake_primitive_test.go b/go/vt/vtgate/engine/fake_primitive_test.go index b878c1931c0..f3ab5ad5336 100644 --- a/go/vt/vtgate/engine/fake_primitive_test.go +++ b/go/vt/vtgate/engine/fake_primitive_test.go @@ -40,7 +40,8 @@ type fakePrimitive struct { // sendErr is sent at the end of the stream if it's set. sendErr error - log []string + noLog bool + log []string allResultsInOneCall bool @@ -85,7 +86,9 @@ func (f *fakePrimitive) TryExecute(ctx context.Context, vcursor VCursor, bindVar } func (f *fakePrimitive) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - f.log = append(f.log, fmt.Sprintf("StreamExecute %v %v", printBindVars(bindVars), wantfields)) + if !f.noLog { + f.log = append(f.log, fmt.Sprintf("StreamExecute %v %v", printBindVars(bindVars), wantfields)) + } if f.results == nil { return f.sendErr } diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index f27ca380876..aac3e9b584c 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -400,6 +400,20 @@ func (t *noopVCursor) GetDBDDLPluginName() string { panic("unimplemented") } +func (t *noopVCursor) SetLastInsertID(uint64) {} +func (t *noopVCursor) VExplainLogging() {} +func (t *noopVCursor) DisableLogging() {} +func (t *noopVCursor) GetVExplainLogs() []ExecuteEntry { + return nil +} +func (t *noopVCursor) GetLogs() ([]ExecuteEntry, error) { + return nil, nil +} + +// RecordMirrorStats implements VCursor. +func (t *noopVCursor) RecordMirrorStats(sourceExecTime, targetExecTime time.Duration, targetErr error) { +} + var ( _ VCursor = (*loggingVCursor)(nil) _ SessionActions = (*loggingVCursor)(nil) @@ -893,20 +907,6 @@ func (t *loggingVCursor) RecordMirrorStats(sourceExecTime, targetExecTime time.D } } -func (t *noopVCursor) VExplainLogging() {} -func (t *noopVCursor) DisableLogging() {} -func (t *noopVCursor) GetVExplainLogs() []ExecuteEntry { - return nil -} - -func (t *noopVCursor) GetLogs() ([]ExecuteEntry, error) { - return nil, nil -} - -// RecordMirrorStats implements VCursor. -func (t *noopVCursor) RecordMirrorStats(sourceExecTime, targetExecTime time.Duration, targetErr error) { -} - func expectResult(t *testing.T, result, want *sqltypes.Result) { t.Helper() fieldsResult := fmt.Sprintf("%v", result.Fields) diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index 10a4048572f..5bc206f7465 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -58,11 +58,9 @@ type Insert struct { // Alias represents the row alias with columns if specified in the query. Alias string - - FetchLastInsertID bool } -// newQueryInsert creates an Insert with a query string. +// newQueryInsert creates an Insert with a query string. Used in testing. func newQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query string) *Insert { return &Insert{ InsertCommon: InsertCommon{ @@ -73,7 +71,7 @@ func newQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query stri } } -// newInsert creates a new Insert. +// newInsert creates a new Insert. Used in testing. func newInsert( opcode InsertOpcode, ignore bool, diff --git a/go/vt/vtgate/engine/insert_common.go b/go/vt/vtgate/engine/insert_common.go index 629d848d978..d4cae045e86 100644 --- a/go/vt/vtgate/engine/insert_common.go +++ b/go/vt/vtgate/engine/insert_common.go @@ -161,7 +161,7 @@ func (ins *InsertCommon) executeUnshardedTableQuery(ctx context.Context, vcursor if err != nil { return nil, err } - qr, err := execShard(ctx, loggingPrimitive, vcursor, query, bindVars, rss[0], true, !ins.PreventAutoCommit /* canAutocommit */, false) + qr, err := execShard(ctx, loggingPrimitive, vcursor, query, bindVars, rss[0], true, !ins.PreventAutoCommit /* canAutocommit */, ins.FetchLastInsertID) if err != nil { return nil, err } diff --git a/go/vt/vtgate/engine/insert_select.go b/go/vt/vtgate/engine/insert_select.go index bccee5f2cf9..af834858175 100644 --- a/go/vt/vtgate/engine/insert_select.go +++ b/go/vt/vtgate/engine/insert_select.go @@ -51,7 +51,7 @@ type ( } ) -// newInsertSelect creates a new InsertSelect. +// newInsertSelect creates a new InsertSelect. Used in testing. func newInsertSelect( ignore bool, keyspace *vindexes.Keyspace, diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index e6fa102581e..7734dd81a6b 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -147,6 +147,8 @@ type ( // RecordMirrorStats is used to record stats about a mirror query. RecordMirrorStats(time.Duration, time.Duration, error) + + SetLastInsertID(uint64) } // SessionActions gives primitives ability to interact with the session state diff --git a/go/vt/vtgate/engine/semi_join.go b/go/vt/vtgate/engine/semi_join.go index f0dd0d09033..b5bc74a5941 100644 --- a/go/vt/vtgate/engine/semi_join.go +++ b/go/vt/vtgate/engine/semi_join.go @@ -18,6 +18,7 @@ package engine import ( "context" + "sync/atomic" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -62,24 +63,26 @@ func (jn *SemiJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma // TryStreamExecute performs a streaming exec. func (jn *SemiJoin) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - joinVars := make(map[string]*querypb.BindVariable) err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, wantfields, func(lresult *sqltypes.Result) error { + joinVars := make(map[string]*querypb.BindVariable) result := &sqltypes.Result{Fields: lresult.Fields} for _, lrow := range lresult.Rows { for k, col := range jn.Vars { joinVars[k] = sqltypes.ValueBindVariable(lrow[col]) } - rowAdded := false + var rowAdded atomic.Bool err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), false, func(rresult *sqltypes.Result) error { - if len(rresult.Rows) > 0 && !rowAdded { - result.Rows = append(result.Rows, lrow) - rowAdded = true + if len(rresult.Rows) > 0 { + rowAdded.Store(true) } return nil }) if err != nil { return err } + if rowAdded.Load() { + result.Rows = append(result.Rows, lrow) + } } return callback(result) }) diff --git a/go/vt/vtgate/engine/semi_join_test.go b/go/vt/vtgate/engine/semi_join_test.go index 8fee0490415..a103b0686b2 100644 --- a/go/vt/vtgate/engine/semi_join_test.go +++ b/go/vt/vtgate/engine/semi_join_test.go @@ -18,6 +18,7 @@ package engine import ( "context" + "sync" "testing" "vitess.io/vitess/go/test/utils" @@ -159,3 +160,81 @@ func TestSemiJoinStreamExecute(t *testing.T) { "4|d|dd", )) } + +// TestSemiJoinStreamExecuteParallelExecution tests SemiJoin stream execution with parallel execution +// to ensure we have no data races. +func TestSemiJoinStreamExecuteParallelExecution(t *testing.T) { + leftPrim := &fakePrimitive{ + results: []*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col1|col2|col3", + "int64|varchar|varchar", + ), + "1|a|aa", + "2|b|bb", + ), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col1|col2|col3", + "int64|varchar|varchar", + ), + "3|c|cc", + "4|d|dd", + ), + }, + async: true, + } + rightFields := sqltypes.MakeTestFields( + "col4|col5|col6", + "int64|varchar|varchar", + ) + rightPrim := &fakePrimitive{ + // we'll return non-empty results for rows 2 and 4 + results: sqltypes.MakeTestStreamingResults(rightFields, + "4|d|dd", + "---", + "---", + "5|e|ee", + "6|f|ff", + "7|g|gg", + ), + async: true, + noLog: true, + } + + jn := &SemiJoin{ + Left: leftPrim, + Right: rightPrim, + Vars: map[string]int{ + "bv": 1, + }, + } + var res *sqltypes.Result + var mu sync.Mutex + err := jn.TryStreamExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { + mu.Lock() + defer mu.Unlock() + if res == nil { + res = result + } else { + res.Rows = append(res.Rows, result.Rows...) + } + return nil + }) + require.NoError(t, err) + leftPrim.ExpectLog(t, []string{ + `StreamExecute true`, + }) + // We'll get all the rows back in left primitive, since we're returning the same set of rows + // from the right primitive that makes them all qualify. + expectResultAnyOrder(t, res, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col1|col2|col3", + "int64|varchar|varchar", + ), + "1|a|aa", + "2|b|bb", + "3|c|cc", + "4|d|dd", + )) +} diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index 95fb5c87a32..f0de330cbed 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -22,23 +22,18 @@ import ( "fmt" "strings" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/sysvars" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/srvtopo" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" ) diff --git a/go/vt/vtgate/evalengine/arena.go b/go/vt/vtgate/evalengine/arena.go index ccfe63f514f..c5457f076e8 100644 --- a/go/vt/vtgate/evalengine/arena.go +++ b/go/vt/vtgate/evalengine/arena.go @@ -71,7 +71,7 @@ func (a *Arena) newEvalEnum(raw []byte, values *EnumSetValues) *evalEnum { } else { a.aEnum = append(a.aEnum, evalEnum{}) } - val := &a.aEnum[len(a.aInt64)-1] + val := &a.aEnum[len(a.aEnum)-1] s := string(raw) val.string = s val.value = valueIdx(values, s) @@ -84,7 +84,7 @@ func (a *Arena) newEvalSet(raw []byte, values *EnumSetValues) *evalSet { } else { a.aSet = append(a.aSet, evalSet{}) } - val := &a.aSet[len(a.aInt64)-1] + val := &a.aSet[len(a.aSet)-1] s := string(raw) val.string = s val.set = evalSetBits(values, s) diff --git a/go/vt/vtgate/evalengine/cached_size.go b/go/vt/vtgate/evalengine/cached_size.go index c1ed1f9475c..d51c65c75b4 100644 --- a/go/vt/vtgate/evalengine/cached_size.go +++ b/go/vt/vtgate/evalengine/cached_size.go @@ -1181,6 +1181,18 @@ func (cached *builtinLastDay) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinLastInsertID) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinLeftRight) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) diff --git a/go/vt/vtgate/evalengine/compiler_asm.go b/go/vt/vtgate/evalengine/compiler_asm.go index dfb1a30bffc..7dda215353f 100644 --- a/go/vt/vtgate/evalengine/compiler_asm.go +++ b/go/vt/vtgate/evalengine/compiler_asm.go @@ -5138,3 +5138,18 @@ func (asm *assembler) Introduce(offset int, t sqltypes.Type, col collations.Type return 1 }, "INTRODUCE (SP-1)") } + +func (asm *assembler) Fn_LAST_INSERT_ID() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1] + if arg == nil { + env.VCursor().SetLastInsertID(0) + } else { + iarg := evalToInt64(arg) + uarg := env.vm.arena.newEvalUint64(uint64(iarg.i)) + env.vm.stack[env.vm.sp-1] = uarg + env.VCursor().SetLastInsertID(uarg.u) + } + return 1 + }, "FN LAST_INSERT_ID UINT64(SP-1)") +} diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go index 88c13a479ed..343bb0cd043 100644 --- a/go/vt/vtgate/evalengine/compiler_test.go +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -760,6 +760,20 @@ func TestCompilerSingle(t *testing.T) { expression: `WEEK(timestamp '2024-01-01 10:34:58', 1)`, result: `INT64(1)`, }, + { + expression: `column0 + 1`, + values: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Enum, []byte("foo"))}, + // Returns 0, as unknown enums evaluate here to -1. We have this test to + // exercise the path to push enums onto the stack. + result: `FLOAT64(0)`, + }, + { + expression: `column0 + 1`, + values: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Set, []byte("foo"))}, + // Returns 1, as unknown sets evaluate here to 0. We have this test to + // exercise the path to push sets onto the stack. + result: `FLOAT64(1)`, + }, } tz, _ := time.LoadLocation("Europe/Madrid") @@ -903,6 +917,99 @@ func TestBindVarLiteral(t *testing.T) { } } +type testVcursor struct { + lastInsertID *uint64 + env *vtenv.Environment +} + +func (t *testVcursor) TimeZone() *time.Location { + return time.UTC +} + +func (t *testVcursor) GetKeyspace() string { + return "apa" +} + +func (t *testVcursor) SQLMode() string { + return "oltp" +} + +func (t *testVcursor) Environment() *vtenv.Environment { + return t.env +} + +func (t *testVcursor) SetLastInsertID(id uint64) { + t.lastInsertID = &id +} + +var _ evalengine.VCursor = (*testVcursor)(nil) + +func TestLastInsertID(t *testing.T) { + var testCases = []struct { + expression string + result uint64 + missing bool + }{ + { + expression: `last_insert_id(1)`, + result: 1, + }, { + expression: `12`, + missing: true, + }, { + expression: `last_insert_id(666)`, + result: 666, + }, { + expression: `last_insert_id(null)`, + result: 0, + }, + } + + venv := vtenv.NewTestEnv() + for _, tc := range testCases { + t.Run(tc.expression, func(t *testing.T) { + expr, err := venv.Parser().ParseExpr(tc.expression) + require.NoError(t, err) + + cfg := &evalengine.Config{ + Collation: collations.CollationUtf8mb4ID, + NoConstantFolding: true, + NoCompilation: false, + Environment: venv, + } + t.Run("eval", func(t *testing.T) { + cfg.NoCompilation = true + runTest(t, expr, cfg, tc) + }) + t.Run("compiled", func(t *testing.T) { + cfg.NoCompilation = false + runTest(t, expr, cfg, tc) + }) + }) + } +} + +func runTest(t *testing.T, expr sqlparser.Expr, cfg *evalengine.Config, tc struct { + expression string + result uint64 + missing bool +}) { + converted, err := evalengine.Translate(expr, cfg) + require.NoError(t, err) + + vc := &testVcursor{env: vtenv.NewTestEnv()} + env := evalengine.NewExpressionEnv(context.Background(), nil, vc) + + _, err = env.Evaluate(converted) + require.NoError(t, err) + if tc.missing { + require.Nil(t, vc.lastInsertID) + } else { + require.NotNil(t, vc.lastInsertID) + require.Equal(t, tc.result, *vc.lastInsertID) + } +} + func TestCompilerNonConstant(t *testing.T) { var testCases = []struct { expression string diff --git a/go/vt/vtgate/evalengine/expr_env.go b/go/vt/vtgate/evalengine/expr_env.go index 38a65f9b4e0..4a7f9849ab0 100644 --- a/go/vt/vtgate/evalengine/expr_env.go +++ b/go/vt/vtgate/evalengine/expr_env.go @@ -35,6 +35,7 @@ type VCursor interface { GetKeyspace() string SQLMode() string Environment() *vtenv.Environment + SetLastInsertID(id uint64) } type ( @@ -140,6 +141,7 @@ func (e *emptyVCursor) GetKeyspace() string { func (e *emptyVCursor) SQLMode() string { return config.DefaultSQLMode } +func (e *emptyVCursor) SetLastInsertID(_ uint64) {} func NewEmptyVCursor(env *vtenv.Environment, tz *time.Location) VCursor { return &emptyVCursor{env: env, tz: tz} diff --git a/go/vt/vtgate/evalengine/fn_misc.go b/go/vt/vtgate/evalengine/fn_misc.go index 8813b62f823..2a2119ee6f4 100644 --- a/go/vt/vtgate/evalengine/fn_misc.go +++ b/go/vt/vtgate/evalengine/fn_misc.go @@ -81,6 +81,10 @@ type ( builtinUUIDToBin struct { CallExpr } + + builtinLastInsertID struct { + CallExpr + } ) var _ IR = (*builtinInetAton)(nil) @@ -95,6 +99,7 @@ var _ IR = (*builtinBinToUUID)(nil) var _ IR = (*builtinIsUUID)(nil) var _ IR = (*builtinUUID)(nil) var _ IR = (*builtinUUIDToBin)(nil) +var _ IR = (*builtinLastInsertID)(nil) func (call *builtinInetAton) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) @@ -194,6 +199,33 @@ func (call *builtinInet6Aton) compile(c *compiler) (ctype, error) { return ctype{Type: sqltypes.VarBinary, Flag: flagNullable, Col: collationBinary}, nil } +func (call *builtinLastInsertID) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + env.VCursor().SetLastInsertID(0) + return nil, err + } + insertID := uint64(evalToInt64(arg).i) + env.VCursor().SetLastInsertID(insertID) + return newEvalUint64(insertID), nil +} + +func (call *builtinLastInsertID) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + c.asm.Fn_LAST_INSERT_ID() + return ctype{Type: sqltypes.Uint64, Flag: arg.Flag & flagNullable, Col: collationNumeric}, nil +} + +func (call *builtinLastInsertID) constant() bool { + return false // we don't want this function to be simplified away +} + func printIPv6AsIPv4(addr netip.Addr) (netip.Addr, bool) { b := addr.AsSlice() if len(b) != 16 { diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go index ea327601975..d559cb8ab1d 100644 --- a/go/vt/vtgate/evalengine/integration/comparison_test.go +++ b/go/vt/vtgate/evalengine/integration/comparison_test.go @@ -209,6 +209,10 @@ type vcursor struct { env *vtenv.Environment } +func (vc *vcursor) SetLastInsertID(id uint64) {} + +var _ evalengine.VCursor = (*vcursor)(nil) + func (vc *vcursor) GetKeyspace() string { return "vttest" } diff --git a/go/vt/vtgate/evalengine/translate_builtin.go b/go/vt/vtgate/evalengine/translate_builtin.go index 476ee32483b..1f8bd7798aa 100644 --- a/go/vt/vtgate/evalengine/translate_builtin.go +++ b/go/vt/vtgate/evalengine/translate_builtin.go @@ -662,6 +662,11 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { return nil, argError(method) } return &builtinReplace{CallExpr: call, collate: ast.cfg.Collation}, nil + case "last_insert_id": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinLastInsertID{CallExpr: call}, nil default: return nil, translateExprNotSupported(fn) } diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index d3d2ba8e8fd..0281e28700f 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -31,6 +31,7 @@ import ( "github.com/spf13/pflag" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/vtgate/dynamicconfig" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/cache/theine" @@ -136,7 +137,8 @@ type Executor struct { warmingReadsPercent int warmingReadsChannel chan bool - vConfig econtext.VCursorConfig + vConfig econtext.VCursorConfig + ddlConfig dynamicconfig.DDL } var executorOnce sync.Once @@ -168,6 +170,7 @@ func NewExecutor( noScatter bool, pv plancontext.PlannerVersion, warmingReadsPercent int, + ddlConfig dynamicconfig.DDL, ) *Executor { e := &Executor{ env: env, @@ -183,6 +186,7 @@ func NewExecutor( plans: plans, warmingReadsPercent: warmingReadsPercent, warmingReadsChannel: make(chan bool, warmingReadsConcurrency), + ddlConfig: ddlConfig, } // setting the vcursor config. e.initVConfig(warnOnShardedOnly, pv) @@ -484,7 +488,7 @@ func (e *Executor) addNeededBindVars(vcursor *econtext.VCursorImpl, bindVarNeeds case sysvars.TransactionMode.Name: txMode := session.TransactionMode if txMode == vtgatepb.TransactionMode_UNSPECIFIED { - txMode = getTxMode() + txMode = transactionMode.Get() } bindVars[key] = sqltypes.StringBindVariable(txMode.String()) case sysvars.Workload.Name: @@ -1156,11 +1160,7 @@ func (e *Executor) buildStatement( reservedVars *sqlparser.ReservedVars, bindVarNeeds *sqlparser.BindVarNeeds, ) (*engine.Plan, error) { - cfg := &dynamicViperConfig{ - onlineDDL: enableOnlineDDL, - directDDL: enableDirectDDL, - } - plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, cfg) + plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, e.ddlConfig) if err != nil { return nil, err } diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 2ee3425209f..43987217039 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -183,7 +183,7 @@ func createExecutorEnvCallback(t testing.TB, eachShard func(shard, ks string, ta // one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness. plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false) - executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} @@ -232,7 +232,7 @@ func createCustomExecutor(t testing.TB, vschema string, mysqlVersion string) (ex plans := DefaultPlanCache() env, err := vtenv.New(vtenv.Options{MySQLServerVersion: mysqlVersion}) require.NoError(t, err) - executor = NewExecutor(ctx, env, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, env, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -269,7 +269,7 @@ func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqlty sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -294,7 +294,7 @@ func createExecutorEnvWithPrimaryReplicaConn(t testing.TB, ctx context.Context, replica = hc.AddTestTablet(cell, "0-replica", 1, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) - executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent) + executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 411f19bb30d..16628729ac6 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -1644,7 +1644,7 @@ func TestSelectListArg(t *testing.T) { func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - ex := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + ex := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) ex.SetQueryLogger(queryLogger) return ex } @@ -3326,7 +3326,7 @@ func TestStreamOrderByWithMultipleResults(t *testing.T) { } queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) defer executor.Close() // some sleep for all goroutines to start @@ -3369,7 +3369,7 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { } queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) defer executor.Close() // some sleep for all goroutines to start diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index a8500dd59c4..8bb10aae8fb 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -68,7 +68,7 @@ func TestStreamSQLSharded(t *testing.T) { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig()) executor.SetQueryLogger(queryLogger) defer executor.Close() diff --git a/go/vt/vtgate/executorcontext/vcursor_impl.go b/go/vt/vtgate/executorcontext/vcursor_impl.go index 40317f5103a..3f8d7def797 100644 --- a/go/vt/vtgate/executorcontext/vcursor_impl.go +++ b/go/vt/vtgate/executorcontext/vcursor_impl.go @@ -22,6 +22,7 @@ import ( "io" "sort" "strings" + "sync" "sync/atomic" "time" @@ -154,6 +155,8 @@ type ( observer ResultsObserver + // this protects the interOpStats and shardsStats fields from concurrent writes + mu sync.Mutex // this is a map of the number of rows that every primitive has returned // if this field is nil, it means that we are not logging operator traffic interOpStats map[engine.Primitive]engine.RowsReceived @@ -642,21 +645,29 @@ func (vc *VCursorImpl) ExecutePrimitive(ctx context.Context, primitive engine.Pr } func (vc *VCursorImpl) logOpTraffic(primitive engine.Primitive, res *sqltypes.Result) { - if vc.interOpStats != nil { - rows := vc.interOpStats[primitive] - if res == nil { - rows = append(rows, 0) - } else { - rows = append(rows, len(res.Rows)) - } - vc.interOpStats[primitive] = rows + if vc.interOpStats == nil { + return + } + + vc.mu.Lock() + defer vc.mu.Unlock() + + rows := vc.interOpStats[primitive] + if res == nil { + rows = append(rows, 0) + } else { + rows = append(rows, len(res.Rows)) } + vc.interOpStats[primitive] = rows } func (vc *VCursorImpl) logShardsQueried(primitive engine.Primitive, shardsNb int) { - if vc.shardsStats != nil { - vc.shardsStats[primitive] += engine.ShardsQueried(shardsNb) + if vc.shardsStats == nil { + return } + vc.mu.Lock() + defer vc.mu.Unlock() + vc.shardsStats[primitive] += engine.ShardsQueried(shardsNb) } func (vc *VCursorImpl) ExecutePrimitiveStandalone(ctx context.Context, primitive engine.Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { @@ -1583,3 +1594,9 @@ func (vc *VCursorImpl) GetContextWithTimeOut(ctx context.Context) (context.Conte func (vc *VCursorImpl) IgnoreMaxMemoryRows() bool { return vc.ignoreMaxMemoryRows } + +func (vc *VCursorImpl) SetLastInsertID(id uint64) { + vc.SafeSession.mu.Lock() + defer vc.SafeSession.mu.Unlock() + vc.SafeSession.LastInsertId = id +} diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go index e31c5ae8161..fecd6c2a8b1 100644 --- a/go/vt/vtgate/legacy_scatter_conn_test.go +++ b/go/vt/vtgate/legacy_scatter_conn_test.go @@ -522,7 +522,7 @@ func TestScatterConnSingleDB(t *testing.T) { assert.Contains(t, errors[0].Error(), want) // TransactionMode_SINGLE in txconn - sc.txConn.mode = vtgatepb.TransactionMode_SINGLE + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_SINGLE} session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true}) _, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{}, false) require.Empty(t, errors) @@ -531,7 +531,7 @@ func TestScatterConnSingleDB(t *testing.T) { assert.Contains(t, errors[0].Error(), want) // TransactionMode_MULTI in txconn. Should not fail. - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true}) _, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{}, false) require.Empty(t, errors) @@ -622,6 +622,8 @@ func newTestScatterConn(ctx context.Context, hc discovery.HealthCheck, serv srvt // in '-cells_to_watch' command line parameter, which is // empty by default. So it's unused in this test, set to nil. gw := NewTabletGateway(ctx, hc, serv, cell) - tc := NewTxConn(gw, vtgatepb.TransactionMode_MULTI) + tc := NewTxConn(gw, &StaticConfig{ + TxMode: vtgatepb.TransactionMode_MULTI, + }) return NewScatterConn("", tc, gw) } diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index 80516871623..d3ad5afac72 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -51,7 +51,7 @@ func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStm } if ks != nil { if tables[0].AutoIncrement == nil && !ctx.SemTable.ForeignKeysPresent() { - plan := insertUnshardedShortcut(insStmt, ks, tables) + plan := insertUnshardedShortcut(ctx, insStmt, ks, tables) setCommentDirectivesOnPlan(plan, insStmt) return newPlanResult(plan, operators.QualifiedTables(ks, tables)...), nil } @@ -90,12 +90,13 @@ func errOutIfPlanCannotBeConstructed(ctx *plancontext.PlanningContext, vTbl *vin return ctx.SemTable.NotUnshardedErr } -func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) engine.Primitive { +func insertUnshardedShortcut(ctx *plancontext.PlanningContext, stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) engine.Primitive { eIns := &engine.Insert{ InsertCommon: engine.InsertCommon{ - Opcode: engine.InsertUnsharded, - Keyspace: ks, - TableName: tables[0].Name.String(), + Opcode: engine.InsertUnsharded, + Keyspace: ks, + TableName: tables[0].Name.String(), + FetchLastInsertID: ctx.SemTable.ShouldFetchLastInsertID(), }, } eIns.Query = generateQuery(stmt) diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index bc71c7195b4..b51eac449fc 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -190,6 +190,7 @@ func transformInsertionSelection(ctx *plancontext.PlanningContext, op *operators ForceNonStreaming: op.ForceNonStreaming, Generate: autoIncGenerate(ins.AutoIncrement), ColVindexes: ins.ColVindexes, + FetchLastInsertID: ctx.SemTable.ShouldFetchLastInsertID(), }, VindexValueOffset: ins.VindexValueOffset, } @@ -659,9 +660,8 @@ func buildInsertPrimitive( } eins := &engine.Insert{ - InsertCommon: ic, - VindexValues: ins.VindexValues, - FetchLastInsertID: ctx.SemTable.ShouldFetchLastInsertID(), + InsertCommon: ic, + VindexValues: ins.VindexValues, } // we would need to generate the query on the fly. The only exception here is diff --git a/go/vt/vtgate/planbuilder/operators/cte_merging.go b/go/vt/vtgate/planbuilder/operators/cte_merging.go index cb19e06b2a7..0c1556c81e4 100644 --- a/go/vt/vtgate/planbuilder/operators/cte_merging.go +++ b/go/vt/vtgate/planbuilder/operators/cte_merging.go @@ -31,7 +31,7 @@ func tryMergeRecurse(ctx *plancontext.PlanningContext, in *RecurseCTE) (Operator } func tryMergeCTE(ctx *plancontext.PlanningContext, seed, term Operator, in *RecurseCTE) *Route { - seedRoute, termRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(seed, term) + seedRoute, termRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(ctx, seed, term) if seedRoute == nil { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index 81e36d54315..015220470e0 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -124,7 +124,7 @@ func createDeleteWithInputOp(ctx *plancontext.PlanningContext, del *sqlparser.De } var delOps []dmlOp - for _, target := range ctx.SemTable.Targets.Constituents() { + for _, target := range ctx.SemTable.DMLTargets.Constituents() { op := createDeleteOpWithTarget(ctx, target, del.Ignore) delOps = append(delOps, op) } @@ -322,7 +322,7 @@ func updateQueryGraphWithSource(ctx *plancontext.PlanningContext, input Operator return op, NoRewrite } if len(qg.Tables) > 1 { - panic(vterrors.VT12001("DELETE on reference table with join")) + panic(vterrors.VT12001("DML on reference table with join")) } for _, tbl := range qg.Tables { if tbl.ID != tblID { diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go index c035b7d11ed..cb3569cf79e 100644 --- a/go/vt/vtgate/planbuilder/operators/join_merging.go +++ b/go/vt/vtgate/planbuilder/operators/join_merging.go @@ -28,7 +28,7 @@ import ( // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. func (jm *joinMerger) mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr) *Route { - lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) + lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(ctx, lhs, rhs) if lhsRoute == nil { return nil } @@ -102,13 +102,13 @@ func mergeAnyShardRoutings(ctx *plancontext.PlanningContext, a, b *AnyShardRouti } } -func prepareInputRoutes(lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { +func prepareInputRoutes(ctx *plancontext.PlanningContext, lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) if lhsRoute == nil || rhsRoute == nil { return nil, nil, nil, nil, 0, 0, false } - lhsRoute, rhsRoute, routingA, routingB, sameKeyspace := getRoutesOrAlternates(lhsRoute, rhsRoute) + lhsRoute, rhsRoute, routingA, routingB, sameKeyspace := getRoutesOrAlternates(ctx, lhsRoute, rhsRoute) a, b := getRoutingType(routingA), getRoutingType(routingB) return lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace @@ -159,7 +159,7 @@ func (rt routingType) String() string { // getRoutesOrAlternates gets the Routings from each Route. If they are from different keyspaces, // we check if this is a table with alternates in other keyspaces that we can use -func getRoutesOrAlternates(lhsRoute, rhsRoute *Route) (*Route, *Route, Routing, Routing, bool) { +func getRoutesOrAlternates(ctx *plancontext.PlanningContext, lhsRoute, rhsRoute *Route) (*Route, *Route, Routing, Routing, bool) { routingA := lhsRoute.Routing routingB := rhsRoute.Routing sameKeyspace := routingA.Keyspace() == routingB.Keyspace() @@ -171,13 +171,17 @@ func getRoutesOrAlternates(lhsRoute, rhsRoute *Route) (*Route, *Route, Routing, return lhsRoute, rhsRoute, routingA, routingB, sameKeyspace } - if refA, ok := routingA.(*AnyShardRouting); ok { + // If we have a reference route, we will try to find an alternate route in same keyspace as other routing keyspace. + // If the reference route is part of DML table update target, alternate keyspace route cannot be considered. + if refA, ok := routingA.(*AnyShardRouting); ok && + !TableID(lhsRoute).IsOverlapping(ctx.SemTable.DMLTargets) { if altARoute := refA.AlternateInKeyspace(routingB.Keyspace()); altARoute != nil { return altARoute, rhsRoute, altARoute.Routing, routingB, true } } - if refB, ok := routingB.(*AnyShardRouting); ok { + if refB, ok := routingB.(*AnyShardRouting); ok && + !TableID(rhsRoute).IsOverlapping(ctx.SemTable.DMLTargets) { if altBRoute := refB.AlternateInKeyspace(routingA.Keyspace()); altBRoute != nil { return lhsRoute, altBRoute, routingA, altBRoute.Routing, true } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go index a2aca74fb6e..e222ae0f343 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go @@ -730,7 +730,7 @@ func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out Operator, joi return nil } - inRoute, outRoute, inRouting, outRouting, sameKeyspace := getRoutesOrAlternates(inRoute, outRoute) + inRoute, outRoute, inRouting, outRouting, sameKeyspace := getRoutesOrAlternates(ctx, inRoute, outRoute) inner, outer := getRoutingType(inRouting), getRoutingType(outRouting) switch { diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go index 000d176b61a..6173b59e0dc 100644 --- a/go/vt/vtgate/planbuilder/operators/union_merging.go +++ b/go/vt/vtgate/planbuilder/operators/union_merging.go @@ -108,7 +108,7 @@ func mergeUnionInputs( lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, ) (Operator, sqlparser.SelectExprs) { - lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) + lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(ctx, lhs, rhs) if lhsRoute == nil { return nil, nil } diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index dd0a86c2de2..18a81175f7b 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -164,7 +164,7 @@ func createUpdateWithInputOp(ctx *plancontext.PlanningContext, upd *sqlparser.Up ueMap := prepareUpdateExpressionList(ctx, upd) var updOps []dmlOp - for _, target := range ctx.SemTable.Targets.Constituents() { + for _, target := range ctx.SemTable.DMLTargets.Constituents() { op := createUpdateOpWithTarget(ctx, upd, target, ueMap[target]) updOps = append(updOps, op) } @@ -308,7 +308,7 @@ func errIfUpdateNotSupported(ctx *plancontext.PlanningContext, stmt *sqlparser.U } } - // Now we check if any of the foreign key columns that are being udpated have dependencies on other updated columns. + // Now we check if any of the foreign key columns that are being updated have dependencies on other updated columns. // This is unsafe, and we currently don't support this in Vitess. if err := ctx.SemTable.ErrIfFkDependentColumnUpdated(stmt.Exprs); err != nil { panic(err) diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index df813b04dea..f3bed93e3c8 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -84,6 +84,7 @@ func (s *planTestSuite) TestPlan() { s.addPKsProvided(vschema, "user", []string{"user_extra"}, []string{"id", "user_id"}) s.addPKsProvided(vschema, "ordering", []string{"order"}, []string{"oid", "region_id"}) s.addPKsProvided(vschema, "ordering", []string{"order_event"}, []string{"oid", "ename"}) + s.addPKsProvided(vschema, "main", []string{"source_of_ref"}, []string{"id"}) // You will notice that some tests expect user.Id instead of user.id. // This is because we now pre-create vindex columns in the symbol @@ -305,6 +306,7 @@ func (s *planTestSuite) TestOne() { s.addPKsProvided(vschema, "user", []string{"user_extra"}, []string{"id", "user_id"}) s.addPKsProvided(vschema, "ordering", []string{"order"}, []string{"oid", "region_id"}) s.addPKsProvided(vschema, "ordering", []string{"order_event"}, []string{"oid", "ename"}) + s.addPKsProvided(vschema, "main", []string{"source_of_ref"}, []string{"id"}) s.testFile("onecase.json", vw, false) } @@ -666,7 +668,7 @@ func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchem current := PlanTest{ Comment: tcase.Comment, Query: tcase.Query, - SkipE2E: true, + SkipE2E: tcase.SkipE2E, } vschema.Version = Gen4 out := getPlanOutput(tcase, vschema, render) diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index 49a03a8f05a..1ecbf3d4ff9 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -6160,6 +6160,44 @@ ] } }, + { + "comment": "last_insert_id on aggregation calculated at the vtgate level", + "query": "select last_insert_id(count(*)) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select last_insert_id(count(*)) from user", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "last_insert_id(count(*)) as last_insert_id(count(*))" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FetchLastInsertID": true, + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select count(*) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, { "comment": "aggregation on top of aggregation works fine", "query": "select distinct count(*) from user, (select distinct count(*) from user) X", diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json index 8893b4df0c0..95cb14e38f5 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json @@ -2648,6 +2648,30 @@ }, "skip_e2e": true }, + { + "comment": "insert using last_insert_id with argument (already an e2e test for this plan)", + "query": "insert into unsharded values(last_insert_id(789), 2)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into unsharded values(last_insert_id(789), 2)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "FetchLastInsertID": true, + "Query": "insert into unsharded values (last_insert_id(789), 2)", + "TableName": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + }, + "skip_e2e": true + }, { "comment": "update vindex value to null with multiple primary keyspace id", "query": "update user set name = null where id in (1, 2, 3)", diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json index 6aa01355934..1bf893beeef 100644 --- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json @@ -2,6 +2,7 @@ { "comment": "select from unqualified ambiguous reference routes to reference source", "query": "select * from ambiguous_ref_with_source", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select * from ambiguous_ref_with_source", @@ -24,6 +25,7 @@ { "comment": "join with unqualified ambiguous reference table routes to optimal keyspace", "query": "select user.col from user join ambiguous_ref_with_source", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ambiguous_ref_with_source", @@ -47,6 +49,7 @@ { "comment": "ambiguous unqualified reference table self-join routes to reference source", "query": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", @@ -69,6 +72,7 @@ { "comment": "ambiguous unqualified reference table can merge with other opcodes left to right.", "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", @@ -92,6 +96,7 @@ { "comment": "ambiguous unqualified reference table can merge with other opcodes left to right and vindex value is in the plan", "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", @@ -119,6 +124,7 @@ { "comment": "qualified join to reference table routes to optimal keyspace", "query": "select user.col from user join main.ambiguous_ref_with_source", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select user.col from user join main.ambiguous_ref_with_source", @@ -142,6 +148,7 @@ { "comment": "insert into ambiguous unqualified reference table routes to source", "query": "insert into ambiguous_ref_with_source(col) values(1)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into ambiguous_ref_with_source(col) values(1)", @@ -164,6 +171,7 @@ { "comment": "Reference tables using left join with a derived table having a limit clause", "query": "SELECT u.id FROM ( SELECT a.id, a.u_id FROM user.ref_with_source AS a WHERE a.id IN (3) ORDER BY a.d_at LIMIT 1) as u LEFT JOIN user.ref_with_source AS u0 ON u.u_id = u0.u_uid ORDER BY u.id", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "SELECT u.id FROM ( SELECT a.id, a.u_id FROM user.ref_with_source AS a WHERE a.id IN (3) ORDER BY a.d_at LIMIT 1) as u LEFT JOIN user.ref_with_source AS u0 ON u.u_id = u0.u_uid ORDER BY u.id", @@ -208,6 +216,7 @@ { "comment": "insert into qualified ambiguous reference table routes to source", "query": "insert into user.ambiguous_ref_with_source(col) values(1)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into user.ambiguous_ref_with_source(col) values(1)", @@ -230,6 +239,7 @@ { "comment": "update unqualified ambiguous reference table routes to source", "query": "update ambiguous_ref_with_source set col = 1", + "skip_e2e": true, "plan": { "QueryType": "UPDATE", "Original": "update ambiguous_ref_with_source set col = 1", @@ -252,6 +262,7 @@ { "comment": "update qualified ambiguous reference table route to source", "query": "update user.ambiguous_ref_with_source set col = 1", + "skip_e2e": true, "plan": { "QueryType": "UPDATE", "Original": "update user.ambiguous_ref_with_source set col = 1", @@ -274,6 +285,7 @@ { "comment": "delete from unqualified ambiguous reference table routes to source", "query": "delete from ambiguous_ref_with_source where col = 1", + "skip_e2e": true, "plan": { "QueryType": "DELETE", "Original": "delete from ambiguous_ref_with_source where col = 1", @@ -296,6 +308,7 @@ { "comment": "delete from qualified ambiguous reference table route to source", "query": "delete from user.ambiguous_ref_with_source where col = 1", + "skip_e2e": true, "plan": { "QueryType": "DELETE", "Original": "delete from user.ambiguous_ref_with_source where col = 1", @@ -318,6 +331,7 @@ { "comment": "join with unqualified unambiguous ref with source routes to requested table", "query": "select user.col from user join ref_with_source", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ref_with_source", @@ -341,6 +355,7 @@ { "comment": "join with unqualified reference optimize routes when source & reference have different names", "query": "select user.col from user join source_of_ref", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select user.col from user join source_of_ref", @@ -364,6 +379,7 @@ { "comment": "join with unqualified reference respects routing rules", "query": "select user.col from user join rerouted_ref", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select user.col from user join rerouted_ref", @@ -387,6 +403,7 @@ { "comment": "join with reference to unqualified source routes to optimal keyspace", "query": "select user.col from user join global_ref", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select user.col from user join global_ref", @@ -410,6 +427,7 @@ { "comment": "insert into qualified reference with unqualified source routes to source", "query": "insert into user.global_ref(col) values(1)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into user.global_ref(col) values(1)", @@ -432,6 +450,7 @@ { "comment": "delete from reference table with another name - query send to source table", "query": "delete from user.ref_with_source where col = 1", + "skip_e2e": true, "plan": { "QueryType": "DELETE", "Original": "delete from user.ref_with_source where col = 1", @@ -454,6 +473,7 @@ { "comment": "update from reference table with another name - query send to source table", "query": "update user.ref_with_source set x = 4 where col = 1", + "skip_e2e": true, "plan": { "QueryType": "UPDATE", "Original": "update user.ref_with_source set x = 4 where col = 1", @@ -476,6 +496,7 @@ { "comment": "insert from reference table with another name - query send to source table", "query": "insert into user.ref_with_source(x) values(4)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into user.ref_with_source(x) values(4)", @@ -498,6 +519,7 @@ { "comment": "delete from reference table - query send to source table", "query": "delete from source_of_ref where col = 1", + "skip_e2e": true, "plan": { "QueryType": "DELETE", "Original": "delete from source_of_ref where col = 1", @@ -520,6 +542,7 @@ { "comment": "update from reference table - query send to source table", "query": "update source_of_ref set x = 4 where col = 1", + "skip_e2e": true, "plan": { "QueryType": "UPDATE", "Original": "update source_of_ref set x = 4 where col = 1", @@ -542,6 +565,7 @@ { "comment": "insert from reference table - query send to source table", "query": "insert into source_of_ref(x) values(4)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into source_of_ref(x) values(4)", @@ -564,6 +588,7 @@ { "comment": "delete from reference table qualified with unsharded - query send to source table", "query": "delete from main.source_of_ref where col = 1", + "skip_e2e": true, "plan": { "QueryType": "DELETE", "Original": "delete from main.source_of_ref where col = 1", @@ -586,6 +611,7 @@ { "comment": "update from reference table qualified with unsharded - query send to source table", "query": "update main.source_of_ref set x = 4 where col = 1", + "skip_e2e": true, "plan": { "QueryType": "UPDATE", "Original": "update main.source_of_ref set x = 4 where col = 1", @@ -608,6 +634,7 @@ { "comment": "insert from reference table qualified with unsharded - query send to source table", "query": "insert into main.source_of_ref(x) values(4)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into main.source_of_ref(x) values(4)", @@ -630,6 +657,7 @@ { "comment": "delete from reference table with another name - query send to source table", "query": "delete from user.ref_with_source where col = 1", + "skip_e2e": true, "plan": { "QueryType": "DELETE", "Original": "delete from user.ref_with_source where col = 1", @@ -652,6 +680,7 @@ { "comment": "update from reference table with another name - query send to source table", "query": "update user.ref_with_source set x = 4 where col = 1", + "skip_e2e": true, "plan": { "QueryType": "UPDATE", "Original": "update user.ref_with_source set x = 4 where col = 1", @@ -674,6 +703,7 @@ { "comment": "insert from reference table with another name - query send to source table", "query": "insert into user.ref_with_source(x) values(4)", + "skip_e2e": true, "plan": { "QueryType": "INSERT", "Original": "insert into user.ref_with_source(x) values(4)", @@ -696,6 +726,7 @@ { "comment": "select with join to reference table in sharded keyspace: should route shard-scoped", "query": "select * from user.ref_with_source ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select * from user.ref_with_source ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", @@ -723,6 +754,7 @@ { "comment": "select with join to reference table in unsharded keyspace: should route shard-scoped", "query": "select * from source_of_ref ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select * from source_of_ref ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", @@ -750,6 +782,7 @@ { "comment": "two sharded and two unsharded reference table join - all should be merged into one route", "query": "select 1 from user u join user_extra ue on u.id = ue.user_id join main.source_of_ref sr on sr.foo = ue.foo join main.rerouted_ref rr on rr.bar = sr.bar", + "skip_e2e": true, "plan": { "QueryType": "SELECT", "Original": "select 1 from user u join user_extra ue on u.id = ue.user_id join main.source_of_ref sr on sr.foo = ue.foo join main.rerouted_ref rr on rr.bar = sr.bar", @@ -771,5 +804,145 @@ "user.user_extra" ] } + }, + { + "comment": "update reference table with join on sharded table", + "query": "update main.source_of_ref as sr join main.rerouted_ref as rr on sr.id = rr.id inner join user.music as m on sr.col = m.col set sr.tt = 5 where m.user_id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update main.source_of_ref as sr join main.rerouted_ref as rr on sr.id = rr.id inner join user.music as m on sr.col = m.col set sr.tt = 5 where m.user_id = 1", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "m_col": 0 + }, + "TableName": "music_rerouted_ref, source_of_ref", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.col from music as m where 1 != 1", + "Query": "select m.col from music as m where m.user_id = 1 lock in share mode", + "Table": "music", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select sr.id from source_of_ref as sr, rerouted_ref as rr where 1 != 1", + "Query": "select sr.id from source_of_ref as sr, rerouted_ref as rr where sr.col = :m_col and sr.id = rr.id lock in share mode", + "Table": "rerouted_ref, source_of_ref" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update source_of_ref as sr set sr.tt = 5 where sr.id in ::dml_vals", + "Table": "source_of_ref" + } + ] + }, + "TablesUsed": [ + "main.rerouted_ref", + "main.source_of_ref", + "user.music" + ] + } + }, + { + "comment": "delete from reference table with join on sharded table", + "query": "delete sr from main.source_of_ref as sr join main.rerouted_ref as rr on sr.id = rr.id inner join user.music as m on sr.col = m.col where m.user_id = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete sr from main.source_of_ref as sr join main.rerouted_ref as rr on sr.id = rr.id inner join user.music as m on sr.col = m.col where m.user_id = 1", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "m_col": 0 + }, + "TableName": "music_rerouted_ref, source_of_ref", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.col from music as m where 1 != 1", + "Query": "select m.col from music as m where m.user_id = 1", + "Table": "music", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select sr.id from source_of_ref as sr, rerouted_ref as rr where 1 != 1", + "Query": "select sr.id from source_of_ref as sr, rerouted_ref as rr where sr.col = :m_col and sr.id = rr.id", + "Table": "rerouted_ref, source_of_ref" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref as sr where sr.id in ::dml_vals", + "Table": "source_of_ref" + } + ] + }, + "TablesUsed": [ + "main.rerouted_ref", + "main.source_of_ref", + "user.music" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/sampledata/user.sql b/go/vt/vtgate/planbuilder/testdata/sampledata/user.sql index 044a1ee140d..ff1afd68fca 100644 --- a/go/vt/vtgate/planbuilder/testdata/sampledata/user.sql +++ b/go/vt/vtgate/planbuilder/testdata/sampledata/user.sql @@ -11,4 +11,13 @@ INSERT INTO sales_extra(colx, cola, colb, start, end) VALUES (13, 'a_3', 'b_3',1000, 1500); INSERT INTO sales_extra(colx, cola, colb, start, end) -VALUES (14, 'a_4', 'b_4',1500, 2000); \ No newline at end of file +VALUES (14, 'a_4', 'b_4',1500, 2000); + +INSERT INTO music (id, user_id, col) +VALUES (100, 1, 'foo'); + +INSERT INTO source_of_ref (id, col, tt) +VALUES (200, 'foo', 2); + +INSERT INTO rerouted_ref (id, ref_col, name) +VALUES (200, 'bar', 'baz'); \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/main.sql b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql index fb03b69419b..e615871cf2b 100644 --- a/go/vt/vtgate/planbuilder/testdata/schemas/main.sql +++ b/go/vt/vtgate/planbuilder/testdata/schemas/main.sql @@ -1,26 +1,44 @@ -CREATE TABLE `unsharded` ( - `id` INT NOT NULL PRIMARY KEY, - `col` VARCHAR(255) DEFAULT NULL, - `col1` VARCHAR(255) DEFAULT NULL, - `col2` VARCHAR(255) DEFAULT NULL, - `name` VARCHAR(255) DEFAULT NULL, - `baz` INT +CREATE TABLE `unsharded` +( + `id` INT NOT NULL PRIMARY KEY, + `col` VARCHAR(255) DEFAULT NULL, + `col1` VARCHAR(255) DEFAULT NULL, + `col2` VARCHAR(255) DEFAULT NULL, + `name` VARCHAR(255) DEFAULT NULL, + `baz` INT ); -CREATE TABLE `unsharded_auto` ( +CREATE TABLE `unsharded_auto` +( `id` INT NOT NULL PRIMARY KEY, `col1` VARCHAR(255) DEFAULT NULL, `col2` VARCHAR(255) DEFAULT NULL ); -CREATE TABLE `unsharded_a` ( +CREATE TABLE `unsharded_a` +( `id` INT NOT NULL PRIMARY KEY, `col` VARCHAR(255) DEFAULT NULL, `name` VARCHAR(255) DEFAULT NULL ); -CREATE TABLE `unsharded_b` ( +CREATE TABLE `unsharded_b` +( `id` INT NOT NULL PRIMARY KEY, `col` VARCHAR(255) DEFAULT NULL, `name` VARCHAR(255) DEFAULT NULL +); + +CREATE TABLE `source_of_ref` +( + `id` INT NOT NULL PRIMARY KEY, + `col` VARCHAR(255) DEFAULT NULL, + `tt` BIGINT DEFAULT NULL +); + +CREATE TABLE `rerouted_ref` +( + `id` INT NOT NULL PRIMARY KEY, + `ref_col` VARCHAR(255) DEFAULT NULL, + `name` VARCHAR(255) DEFAULT NULL ); \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/schemas/user.sql b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql index 818d2508069..10c1886a992 100644 --- a/go/vt/vtgate/planbuilder/testdata/schemas/user.sql +++ b/go/vt/vtgate/planbuilder/testdata/schemas/user.sql @@ -1,25 +1,25 @@ CREATE TABLE user ( - id INT PRIMARY KEY, - col BIGINT, - intcol BIGINT, - user_id INT, - id1 INT, - id2 INT, - id3 INT, - m INT, - bar INT, - a INT, - name VARCHAR(255), - col1 VARCHAR(255), - col2 VARCHAR(255), - costly VARCHAR(255), - predef1 VARCHAR(255), - predef2 VARCHAR(255), - textcol1 VARCHAR(255), - textcol2 VARCHAR(255), - someColumn VARCHAR(255), - foo VARCHAR(255) + id INT PRIMARY KEY, + col BIGINT, + intcol BIGINT, + user_id INT, + id1 INT, + id2 INT, + id3 INT, + m INT, + bar INT, + a INT, + name VARCHAR(255), + col1 VARCHAR(255), + col2 VARCHAR(255), + costly VARCHAR(255), + predef1 VARCHAR(255), + predef2 VARCHAR(255), + textcol1 VARCHAR(255), + textcol2 VARCHAR(255), + someColumn VARCHAR(255), + foo VARCHAR(255) ); CREATE TABLE user_metadata @@ -34,15 +34,23 @@ CREATE TABLE user_metadata CREATE TABLE music ( - user_id INT, - id INT, - col1 VARCHAR(255), - col2 VARCHAR(255), - genre VARCHAR(255), + user_id INT, + id INT, + col VARCHAR(255), + col1 VARCHAR(255), + col2 VARCHAR(255), + genre VARCHAR(255), componist VARCHAR(255), PRIMARY KEY (user_id) ); +CREATE TABLE name_user_vdx +( + name INT, + keyspace_id VARBINARY(10), + primary key (name) +); + CREATE TABLE samecolvin ( col VARCHAR(255), @@ -118,69 +126,63 @@ CREATE TABLE authoritative CREATE TABLE colb_colc_map ( - colb INT PRIMARY KEY, - colc INT, + colb INT PRIMARY KEY, + colc INT, keyspace_id VARCHAR(255) ); CREATE TABLE seq ( - id INT, - next_id BIGINT, - cache BIGINT, + id INT, + next_id BIGINT, + cache BIGINT, PRIMARY KEY (id) ) COMMENT 'vitess_sequence'; CREATE TABLE user_extra ( - id INT, - user_id INT, - extra_id INT, - col INT, - m2 INT, + id INT, + user_id INT, + extra_id INT, + col INT, + m2 INT, PRIMARY KEY (id, extra_id) ); CREATE TABLE name_user_map ( - name VARCHAR(255), - keyspace_id VARCHAR(255) -); - -CREATE TABLE name_user_vdx -( - name VARCHAR(255), - keyspace_id VARCHAR(255) + name VARCHAR(255), + keyspace_id VARCHAR(255) ); CREATE TABLE costly_map ( - costly VARCHAR(255), - keyspace_id VARCHAR(255) + costly VARCHAR(255), + keyspace_id VARCHAR(255) ); CREATE TABLE unq_binary_idx ( - id INT PRIMARY KEY, - col1 INT + id INT PRIMARY KEY, + col1 INT ); CREATE TABLE sales ( - oid INT PRIMARY KEY, - col1 VARCHAR(255) + oid INT PRIMARY KEY, + col1 VARCHAR(255) ); CREATE TABLE sales_extra ( - colx INT PRIMARY KEY, - cola VARCHAR(255), - colb VARCHAR(255), - start INT, - end INT + colx INT PRIMARY KEY, + cola VARCHAR(255), + colb VARCHAR(255), + start INT, + end INT ); CREATE TABLE ref ( - col INT PRIMARY KEY + col INT PRIMARY KEY ); \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.json b/go/vt/vtgate/planbuilder/testdata/set_cases.json index 58cb2fffa75..02c5603a03c 100644 --- a/go/vt/vtgate/planbuilder/testdata/set_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/set_cases.json @@ -605,5 +605,28 @@ ] } } + }, + { + "comment": "set last_insert_id with agrument to user defined variable", + "query": "set @foo = last_insert_id(1)", + "plan": { + "QueryType": "SET", + "Original": "set @foo = last_insert_id(1)", + "Instructions": { + "OperatorType": "Set", + "Ops": [ + { + "Type": "UserDefinedVariable", + "Name": "foo", + "Expr": "last_insert_id(1)" + } + ], + "Inputs": [ + { + "OperatorType": "SingleRow" + } + ] + } + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json index 9241cec595c..55329586b0e 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json @@ -342,7 +342,12 @@ { "comment": "reference table delete with join", "query": "delete r from user u join ref_with_source r on u.col = r.col", - "plan": "VT12001: unsupported: DELETE on reference table with join" + "plan": "VT12001: unsupported: DML on reference table with join" + }, + { + "comment": "reference table update with join", + "query": "update user u join ref_with_source r on u.col = r.col set r.col = 5", + "plan": "VT12001: unsupported: DML on reference table with join" }, { "comment": "group_concat unsupported when needs full evaluation at vtgate with more than 1 column", diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index edba48a9151..85f236a9a18 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -685,7 +685,7 @@ func (stc *ScatterConn) multiGoTransaction( startTime, statsKey := stc.startAction(name, rs.Target) defer stc.endAction(startTime, allErrors, statsKey, &err, session) - info, shardSession, err := actionInfo(ctx, rs.Target, session, autocommit, stc.txConn.mode) + info, shardSession, err := actionInfo(ctx, rs.Target, session, autocommit, stc.txConn.txMode.TransactionMode()) if err != nil { return } @@ -702,7 +702,7 @@ func (stc *ScatterConn) multiGoTransaction( shardSession.RowsAffected = info.rowsAffected } if info.actionNeeded != nothing && (info.transactionID != 0 || info.reservedID != 0) { - appendErr := session.AppendOrUpdate(rs.Target, info, shardSession, stc.txConn.mode) + appendErr := session.AppendOrUpdate(rs.Target, info, shardSession, stc.txConn.txMode.TransactionMode()) if appendErr != nil { err = appendErr } diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go index 988932f4414..62cdc019ddf 100644 --- a/go/vt/vtgate/semantics/analyzer.go +++ b/go/vt/vtgate/semantics/analyzer.go @@ -174,7 +174,7 @@ func (a *analyzer) newSemTable( Direct: a.binder.direct, ExprTypes: a.typer.m, Tables: a.tables.Tables, - Targets: a.binder.targets, + DMLTargets: a.binder.targets, NotSingleRouteErr: a.notSingleRouteErr, NotUnshardedErr: a.unshardedErr, Warning: a.warning, diff --git a/go/vt/vtgate/semantics/semantic_table.go b/go/vt/vtgate/semantics/semantic_table.go index 492259427c5..30a41ba5f12 100644 --- a/go/vt/vtgate/semantics/semantic_table.go +++ b/go/vt/vtgate/semantics/semantic_table.go @@ -130,8 +130,8 @@ type ( // It doesn't recurse inside derived tables to find the original dependencies. Direct ExprDependencies - // Targets contains the TableSet of each table getting modified by the update/delete statement. - Targets TableSet + // DMLTargets contains the TableSet of each table getting modified by the update/delete statement. + DMLTargets TableSet // ColumnEqualities is used for transitive closures (e.g., if a == b and b == c, then a == c). ColumnEqualities map[columnName][]sqlparser.Expr @@ -203,7 +203,7 @@ func (st *SemTable) CopyDependencies(from, to sqlparser.Expr) { // GetChildForeignKeysForTargets gets the child foreign keys as a list for all the target tables. func (st *SemTable) GetChildForeignKeysForTargets() (fks []vindexes.ChildFKInfo) { - for _, ts := range st.Targets.Constituents() { + for _, ts := range st.DMLTargets.Constituents() { fks = append(fks, st.childForeignKeysInvolved[ts]...) } return fks @@ -211,7 +211,7 @@ func (st *SemTable) GetChildForeignKeysForTargets() (fks []vindexes.ChildFKInfo) // GetChildForeignKeysForTableSet gets the child foreign keys as a listfor the TableSet. func (st *SemTable) GetChildForeignKeysForTableSet(target TableSet) (fks []vindexes.ChildFKInfo) { - for _, ts := range st.Targets.Constituents() { + for _, ts := range st.DMLTargets.Constituents() { if target.IsSolvedBy(ts) { fks = append(fks, st.childForeignKeysInvolved[ts]...) } @@ -239,7 +239,7 @@ func (st *SemTable) GetChildForeignKeysList() []vindexes.ChildFKInfo { // GetParentForeignKeysForTargets gets the parent foreign keys as a list for all the target tables. func (st *SemTable) GetParentForeignKeysForTargets() (fks []vindexes.ParentFKInfo) { - for _, ts := range st.Targets.Constituents() { + for _, ts := range st.DMLTargets.Constituents() { fks = append(fks, st.parentForeignKeysInvolved[ts]...) } return fks @@ -247,7 +247,7 @@ func (st *SemTable) GetParentForeignKeysForTargets() (fks []vindexes.ParentFKInf // GetParentForeignKeysForTableSet gets the parent foreign keys as a list for the TableSet. func (st *SemTable) GetParentForeignKeysForTableSet(target TableSet) (fks []vindexes.ParentFKInfo) { - for _, ts := range st.Targets.Constituents() { + for _, ts := range st.DMLTargets.Constituents() { if target.IsSolvedBy(ts) { fks = append(fks, st.parentForeignKeysInvolved[ts]...) } @@ -971,7 +971,7 @@ func (st *SemTable) UpdateChildFKExpr(origUpdExpr *sqlparser.UpdateExpr, newExpr // GetTargetTableSetForTableName returns the TableSet for the given table name from the target tables. func (st *SemTable) GetTargetTableSetForTableName(name sqlparser.TableName) (TableSet, error) { - for _, target := range st.Targets.Constituents() { + for _, target := range st.DMLTargets.Constituents() { tbl, err := st.Tables[target.TableOffset()].Name() if err != nil { return "", err diff --git a/go/vt/vtgate/static_config.go b/go/vt/vtgate/static_config.go new file mode 100644 index 00000000000..f78545ebc5b --- /dev/null +++ b/go/vt/vtgate/static_config.go @@ -0,0 +1,40 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + +// StaticConfig is a static configuration for vtgate. +// It is used for tests and vtexplain_vtgate where we don't want the user to +// control certain configs. +type StaticConfig struct { + OnlineDDLEnabled bool + DirectDDLEnabled bool + TxMode vtgatepb.TransactionMode +} + +func (s *StaticConfig) OnlineEnabled() bool { + return s.OnlineDDLEnabled +} + +func (s *StaticConfig) DirectEnabled() bool { + return s.DirectDDLEnabled +} + +func (s *StaticConfig) TransactionMode() vtgatepb.TransactionMode { + return s.TxMode +} diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go index cadb1392eca..dbd76b04c7a 100644 --- a/go/vt/vtgate/tx_conn.go +++ b/go/vt/vtgate/tx_conn.go @@ -33,6 +33,7 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/dynamicconfig" econtext "vitess.io/vitess/go/vt/vtgate/executorcontext" "vitess.io/vitess/go/vt/vttablet/queryservice" ) @@ -44,14 +45,14 @@ const nonAtomicCommitWarnMaxShards = 16 // TxConn is used for executing transactional requests. type TxConn struct { tabletGateway *TabletGateway - mode vtgatepb.TransactionMode + txMode dynamicconfig.TxMode } // NewTxConn builds a new TxConn. -func NewTxConn(gw *TabletGateway, txMode vtgatepb.TransactionMode) *TxConn { +func NewTxConn(gw *TabletGateway, txMode dynamicconfig.TxMode) *TxConn { return &TxConn{ tabletGateway: gw, - mode: txMode, + txMode: txMode, } } @@ -114,7 +115,7 @@ func (txc *TxConn) Commit(ctx context.Context, session *econtext.SafeSession) er case vtgatepb.TransactionMode_TWOPC: twopc = true case vtgatepb.TransactionMode_UNSPECIFIED: - twopc = txc.mode == vtgatepb.TransactionMode_TWOPC + twopc = txc.txMode.TransactionMode() == vtgatepb.TransactionMode_TWOPC } defer recordCommitTime(session, twopc, time.Now()) diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index d96f0b8fccf..6d31aa4e543 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -72,7 +72,7 @@ func TestTxConnCommitFailure(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbcs, rssm, rssa := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 3) - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"] // Sequence the executes to ensure commit order @@ -173,7 +173,7 @@ func TestTxConnCommitFailureAfterNonAtomicCommitMaxShards(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbcs, rssm, _ := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 18) - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"] // Sequence the executes to ensure commit order @@ -227,7 +227,7 @@ func TestTxConnCommitFailureBeforeNonAtomicCommitMaxShards(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbcs, rssm, _ := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 17) - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"] // Sequence the executes to ensure commit order @@ -281,7 +281,7 @@ func TestTxConnCommitSuccess(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} // Sequence the executes to ensure commit order session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true}) @@ -334,7 +334,7 @@ func TestTxConnReservedCommitSuccess(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} // Sequence the executes to ensure commit order session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true}) @@ -419,7 +419,7 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) { keyspace := "TestTxConn" sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace) - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} // Sequence the executes to ensure shard session order session := econtext.NewSafeSession(&vtgatepb.Session{InReservedConn: true}) @@ -514,7 +514,7 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) { keyspace := "TestTxConn" sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace) - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} // Sequence the executes to ensure shard session order session := econtext.NewSafeSession(&vtgatepb.Session{InReservedConn: true}) @@ -608,7 +608,7 @@ func TestTxConnCommitOrderFailure1(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} queries := []*querypb.BoundQuery{{Sql: "query1"}} @@ -641,7 +641,7 @@ func TestTxConnCommitOrderFailure2(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} queries := []*querypb.BoundQuery{{ Sql: "query1", @@ -675,7 +675,7 @@ func TestTxConnCommitOrderFailure3(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} queries := []*querypb.BoundQuery{{ Sql: "query1", @@ -717,7 +717,7 @@ func TestTxConnCommitOrderSuccess(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} queries := []*querypb.BoundQuery{{ Sql: "query1", @@ -815,7 +815,7 @@ func TestTxConnReservedCommitOrderSuccess(t *testing.T) { ctx := utils.LeakCheckContext(t) sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") - sc.txConn.mode = vtgatepb.TransactionMode_MULTI + sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI} queries := []*querypb.BoundQuery{{ Sql: "query1", diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index 3852bbfcde3..278dd6932ae 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -25,10 +25,9 @@ import ( "strings" "time" - "vitess.io/vitess/go/ptr" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/ptr" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -473,6 +472,40 @@ func buildGlobalTables(source *vschemapb.SrvVSchema, vschema *VSchema) { } } +// AddAdditionalGlobalTables adds unique tables from unsharded keyspaces to the global tables. +// It is expected to be called from the schema tracking code. Note that this is called after `BuildVSchema` +// which means that the global tables are already populated with the tables from the sharded keyspaces and from +// unsharded keyspaces which have tables specified in associated vschemas. +func AddAdditionalGlobalTables(source *vschemapb.SrvVSchema, vschema *VSchema) { + newTables := make(map[string]*Table) + + // Collect valid uniquely named tables from unsharded keyspaces. + for ksname, ks := range source.Keyspaces { + ksvschema := vschema.Keyspaces[ksname] + // Ignore sharded keyspaces and those flagged for explicit routing. + if ks.RequireExplicitRouting || ks.Sharded { + continue + } + for tname, table := range ksvschema.Tables { + // Ignore tables already global (i.e. if specified in the vschema of an unsharded keyspace) or ambiguous. + if _, found := vschema.globalTables[tname]; !found { + _, ok := newTables[tname] + if !ok { + table.Keyspace = ksvschema.Keyspace + newTables[tname] = table + } else { + newTables[tname] = nil + } + } + } + } + + // Mark new tables found just once as globally routable, rest as ambiguous. + for k, v := range newTables { + vschema.globalTables[k] = v + } +} + func buildKeyspaceGlobalTables(vschema *VSchema, ksvschema *KeyspaceSchema) { for tname, t := range ksvschema.Tables { if gt, ok := vschema.globalTables[tname]; ok { diff --git a/go/vt/vtgate/vindexes/vschema_routing_test.go b/go/vt/vtgate/vindexes/vschema_routing_test.go new file mode 100644 index 00000000000..48ac9239fbb --- /dev/null +++ b/go/vt/vtgate/vindexes/vschema_routing_test.go @@ -0,0 +1,500 @@ +/* +Copyright 2025 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "encoding/json" + "errors" + "sort" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" +) + +// TestAutoGlobalRoutingExtended tests the global routing of tables across various keyspace configurations, +// including unsharded and sharded keyspaces, with and without the RequireExplicitRouting flag. +func TestAutoGlobalRoutingExtended(t *testing.T) { + isTableGloballyRoutable := func(vschema *VSchema, tableName string) (isGlobal, isAmbiguous bool) { + table, err := vschema.FindTable("", tableName) + if err != nil { + if strings.Contains(err.Error(), "ambiguous") { + return false, true + } + return false, false + } + return table != nil, false + } + type testKeySpace struct { + name string + ks *vschemapb.Keyspace + } + unsharded1 := &testKeySpace{ + name: "unsharded1", + ks: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "table1": {}, + "table2": {}, + "scommon1": {}, + "ucommon3": {}, + }, + }, + } + unsharded2 := &testKeySpace{ + name: "unsharded2", + ks: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "table3": {}, + "table4": {}, + "scommon1": {}, + "scommon2": {}, + "ucommon3": {}, + }, + }, + } + sharded1 := &testKeySpace{ + name: "sharded1", + ks: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "table5": {}, + "scommon1": {}, + "scommon2": {}, + }, + }, + } + sharded2 := &testKeySpace{ + name: "sharded2", + ks: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "table6": {}, + "scommon2": {}, + "scommon3": {}, + }, + }, + } + for _, tables := range []*vschemapb.Keyspace{sharded1.ks, sharded2.ks} { + for _, t := range tables.Tables { + t.ColumnVindexes = append(t.ColumnVindexes, &vschemapb.ColumnVindex{ + Column: "c1", + Name: "xxhash", + }) + } + } + type testCase struct { + name string + keyspaces []*testKeySpace + expGlobalTables []string + expAmbiguousTables []string + explicit []string + } + testCases := []testCase{ + { + name: "no keyspaces", + keyspaces: []*testKeySpace{}, + expGlobalTables: nil, + expAmbiguousTables: nil, + }, + { + name: "one unsharded keyspace", + keyspaces: []*testKeySpace{unsharded1}, + expGlobalTables: []string{"table1", "table2", "scommon1", "ucommon3"}, + expAmbiguousTables: nil, + }, + { + name: "two unsharded keyspaces", + keyspaces: []*testKeySpace{unsharded1, unsharded2}, + expGlobalTables: []string{"table1", "table2", "table3", "table4", "scommon2"}, + expAmbiguousTables: []string{"scommon1", "ucommon3"}, + }, + { + name: "two unsharded keyspaces, one with RequireExplicitRouting", + keyspaces: []*testKeySpace{unsharded1, unsharded2}, + explicit: []string{"unsharded1"}, + expGlobalTables: []string{"table3", "table4", "scommon1", "scommon2", "ucommon3"}, + }, + { + name: "one sharded keyspace", + keyspaces: []*testKeySpace{sharded1}, + expGlobalTables: []string{"table5", "scommon1", "scommon2"}, + }, + { + name: "two sharded keyspaces", + keyspaces: []*testKeySpace{sharded1, sharded2}, + expGlobalTables: []string{"table5", "table6", "scommon1", "scommon3"}, + expAmbiguousTables: []string{"scommon2"}, + }, + { + name: "two sharded keyspaces, one with RequireExplicitRouting", + keyspaces: []*testKeySpace{sharded1, sharded2}, + explicit: []string{"sharded2"}, + expGlobalTables: []string{"table5", "scommon1", "scommon2"}, + }, + { + name: "two sharded keyspaces, both with RequireExplicitRouting", + keyspaces: []*testKeySpace{sharded1, sharded2}, + explicit: []string{"sharded1", "sharded2"}, + expGlobalTables: nil, + }, + { + name: "two sharded keyspaces, one unsharded keyspace", + keyspaces: []*testKeySpace{sharded1, sharded2, unsharded1}, + expGlobalTables: []string{"table1", "table2", "table5", "table6", "scommon3", "ucommon3"}, + expAmbiguousTables: []string{"scommon1", "scommon2"}, + }, + { + name: "two sharded keyspaces, one unsharded keyspace, one with RequireExplicitRouting", + keyspaces: []*testKeySpace{sharded1, sharded2, unsharded1, unsharded2}, + explicit: []string{"unsharded1"}, + expGlobalTables: []string{"table3", "table4", "table5", "table6", "scommon3", "ucommon3"}, + expAmbiguousTables: []string{"scommon1", "scommon2"}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + allTables := make(map[string]bool) + source := &vschemapb.SrvVSchema{ + Keyspaces: make(map[string]*vschemapb.Keyspace), + } + for _, ks := range tc.keyspaces { + source.Keyspaces[ks.name] = ks.ks + // set it false for all keyspaces here and later override for those requested in the test case + ks.ks.RequireExplicitRouting = false + for tname := range ks.ks.Tables { + _, ok := allTables[tname] + if !ok { + allTables[tname] = true + } + } + } + for _, ksName := range tc.explicit { + source.Keyspaces[ksName].RequireExplicitRouting = true + } + vschema := BuildVSchema(source, sqlparser.NewTestParser()) + require.NotNil(t, vschema) + AddAdditionalGlobalTables(source, vschema) + + var globalTables, ambiguousTables []string + for tname := range allTables { + isGlobal, isAmbiguous := isTableGloballyRoutable(vschema, tname) + if isGlobal { + globalTables = append(globalTables, tname) + } else if isAmbiguous { + ambiguousTables = append(ambiguousTables, tname) + } + } + sort.Strings(globalTables) + sort.Strings(ambiguousTables) + sort.Strings(tc.expGlobalTables) + sort.Strings(tc.expAmbiguousTables) + require.EqualValuesf(t, tc.expGlobalTables, globalTables, "global tables mismatch") + require.EqualValuesf(t, tc.expAmbiguousTables, ambiguousTables, "ambiguous tables mismatch") + }) + } +} + +// TestAutoGlobalRouting tests adding tables in unsharded keyspaces to global routing if they don't have +// an associated VSchema which has the RequireExplicitRouting flag set. These tables should also not be +// already part of the global routing tables via the VSchema of sharded keyspaces. +func TestAutoGlobalRoutingBasic(t *testing.T) { + // Create two unsharded keyspaces and two sharded keyspaces, each with some common tables. + unsharded1 := &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "table1": {}, // unique, should be added to global routing + "table2": {}, // unique, should be added to global routing + "scommon1": {}, // common with sharded1, should not be added to global routing because it is already global because of sharded1 + "ucommon3": {}, // common with unsharded2, should not be added to global routing because it is ambiguous because of unsharded2 + }, + } + unsharded2 := &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "table3": {}, // unique, should be added to global routing + "table4": {}, // unique, should be added to global routing + "scommon1": {}, // common with sharded1, should not be added to global routing because it is already global because of sharded1 + "scommon2": {}, // common with sharded1, should not be added to global routing because it is already global because of sharded1 + "ucommon3": {}, // common with unsharded1, should not be added to global routing because it is ambiguous because of unsharded1 + }, + } + sharded1 := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "table5": {}, // unique, should be added to global routing + "scommon1": {}, // common with unsharded1 and unsharded2, should be added to global routing because it's in the vschema + "scommon2": {}, // common with unsharded2, not ambiguous because sharded2 sets RequireExplicitRouting + }, + } + sharded2 := &vschemapb.Keyspace{ + Sharded: true, + RequireExplicitRouting: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + // none should be considered for choice as global or ambiguous because of RequireExplicitRouting + Tables: map[string]*vschemapb.Table{ + "table6": {}, // unique + "scommon2": {}, // common with sharded1, but has RequireExplicitRouting + "scommon3": {}, // unique + }, + } + for _, ks := range []*vschemapb.Keyspace{sharded1, sharded2} { + for _, t := range ks.Tables { + t.ColumnVindexes = append(t.ColumnVindexes, &vschemapb.ColumnVindex{ + Column: "c1", + Name: "xxhash", + }) + } + } + source := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "sharded1": sharded1, + "sharded2": sharded2, + }, + } + + vschema := BuildVSchema(source, sqlparser.NewTestParser()) + require.NotNil(t, vschema) + + // Check table is global + mustRouteGlobally := func(t *testing.T, tname, ksName string) { + t.Helper() + _, err := vschema.FindTable("", tname) + require.NoError(t, err) + // The vtgate data structures don't always set the keyspace name, so cannot reliably check that at the moment. + _ = ksName + } + + mustNotRouteGlobally := func(t *testing.T, tname string) { + t.Helper() + _, err := vschema.FindTable("", tname) + require.Error(t, err) + } + + // Verify the global tables + ks := vschema.Keyspaces["sharded1"] + require.EqualValues(t, vschema.globalTables, map[string]*Table{ + "table5": ks.Tables["table5"], + "scommon1": ks.Tables["scommon1"], + "scommon2": ks.Tables["scommon2"], + }) + mustRouteGlobally(t, "table5", "sharded1") + mustRouteGlobally(t, "scommon1", "sharded1") + mustRouteGlobally(t, "scommon2", "sharded1") + + // Add unsharded keyspaces to SrvVSchema and build VSchema + var err error + source.Keyspaces["unsharded1"] = unsharded1 + source.Keyspaces["unsharded2"] = unsharded2 + vschema.Keyspaces["unsharded1"], err = BuildKeyspace(unsharded1, sqlparser.NewTestParser()) + require.NoError(t, err) + vschema.Keyspaces["unsharded2"], err = BuildKeyspace(unsharded2, sqlparser.NewTestParser()) + require.NoError(t, err) + + // Verify the global tables don't change + mustRouteGlobally(t, "table5", "sharded1") + mustRouteGlobally(t, "scommon1", "sharded1") + mustRouteGlobally(t, "scommon2", "sharded1") + + // Add additional global tables and then verify that the unsharded global tables are added + AddAdditionalGlobalTables(source, vschema) + + mustRouteGlobally(t, "table1", "unsharded1") + mustRouteGlobally(t, "table2", "unsharded1") + + mustRouteGlobally(t, "table3", "unsharded2") + mustRouteGlobally(t, "table4", "unsharded2") + mustNotRouteGlobally(t, "ucommon3") + + mustRouteGlobally(t, "scommon1", "sharded1") + mustRouteGlobally(t, "scommon2", "sharded1") + mustRouteGlobally(t, "table5", "sharded1") + + mustNotRouteGlobally(t, "table6") + mustNotRouteGlobally(t, "scommon3") +} + +func TestVSchemaRoutingRules(t *testing.T) { + input := vschemapb.SrvVSchema{ + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{{ + FromTable: "rt1", + ToTables: []string{"ks1.t1", "ks2.t2"}, + }, { + FromTable: "rt2", + ToTables: []string{"ks2.t2"}, + }, { + FromTable: "escaped", + ToTables: []string{"`ks2`.`t2`"}, + }, { + FromTable: "dup", + ToTables: []string{"ks1.t1"}, + }, { + FromTable: "dup", + ToTables: []string{"ks1.t1"}, + }, { + FromTable: "badname", + ToTables: []string{"t1.t2.t3"}, + }, { + FromTable: "unqualified", + ToTables: []string{"t1"}, + }, { + FromTable: "badkeyspace", + ToTables: []string{"ks3.t1"}, + }, { + FromTable: "notfound", + ToTables: []string{"ks1.t2"}, + }}, + }, + Keyspaces: map[string]*vschemapb.Keyspace{ + "ks1": { + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Vindexes: map[string]*vschemapb.Vindex{ + "stfu1": { + Type: "stfu", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c1", + Name: "stfu1", + }, + }, + }, + }, + }, + "ks2": { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vschemapb.Table{ + "t2": {}, + }, + }, + }, + } + got := BuildVSchema(&input, sqlparser.NewTestParser()) + ks1 := &Keyspace{ + Name: "ks1", + Sharded: true, + } + ks2 := &Keyspace{ + Name: "ks2", + } + vindex1 := &stFU{ + name: "stfu1", + } + t1 := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: ks1, + ColumnVindexes: []*ColumnVindex{{ + Columns: []sqlparser.IdentifierCI{sqlparser.NewIdentifierCI("c1")}, + Type: "stfu", + Name: "stfu1", + Vindex: vindex1, + isUnique: vindex1.IsUnique(), + cost: vindex1.Cost(), + }}, + } + t1.Ordered = []*ColumnVindex{ + t1.ColumnVindexes[0], + } + t2 := &Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: ks2, + } + want := &VSchema{ + MirrorRules: map[string]*MirrorRule{}, + RoutingRules: map[string]*RoutingRule{ + "rt1": { + Error: errors.New("table rt1 has more than one target: [ks1.t1 ks2.t2]"), + }, + "rt2": { + Tables: []*Table{t2}, + }, + "escaped": { + Tables: []*Table{t2}, + }, + "dup": { + Error: errors.New("duplicate rule for entry dup"), + }, + "badname": { + Error: errors.New("invalid table name: 't1.t2.t3', it must be of the qualified form . (dots are not allowed in either name)"), + }, + "unqualified": { + Error: errors.New("invalid table name: 't1', it must be of the qualified form . (dots are not allowed in either name)"), + }, + "badkeyspace": { + Error: errors.New("VT05003: unknown database 'ks3' in vschema"), + }, + "notfound": { + Error: errors.New("table t2 not found"), + }, + }, + globalTables: map[string]*Table{ + "t1": t1, + "t2": t2, + }, + uniqueVindexes: map[string]Vindex{ + "stfu1": vindex1, + }, + Keyspaces: map[string]*KeyspaceSchema{ + "ks1": { + Keyspace: ks1, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Tables: map[string]*Table{ + "t1": t1, + }, + Vindexes: map[string]Vindex{ + "stfu1": vindex1, + }, + }, + "ks2": { + ForeignKeyMode: vschemapb.Keyspace_managed, + Keyspace: ks2, + Tables: map[string]*Table{ + "t2": t2, + }, + Vindexes: map[string]Vindex{}, + }, + }, + } + gotb, _ := json.MarshalIndent(got, "", " ") + wantb, _ := json.MarshalIndent(want, "", " ") + assert.Equal(t, string(wantb), string(gotb), string(gotb)) +} diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index f9bcf43ddaa..70e70745e9b 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -748,157 +748,6 @@ func TestShardedVSchemaOwnerInfo(t *testing.T) { } } -func TestVSchemaRoutingRules(t *testing.T) { - input := vschemapb.SrvVSchema{ - RoutingRules: &vschemapb.RoutingRules{ - Rules: []*vschemapb.RoutingRule{{ - FromTable: "rt1", - ToTables: []string{"ks1.t1", "ks2.t2"}, - }, { - FromTable: "rt2", - ToTables: []string{"ks2.t2"}, - }, { - FromTable: "escaped", - ToTables: []string{"`ks2`.`t2`"}, - }, { - FromTable: "dup", - ToTables: []string{"ks1.t1"}, - }, { - FromTable: "dup", - ToTables: []string{"ks1.t1"}, - }, { - FromTable: "badname", - ToTables: []string{"t1.t2.t3"}, - }, { - FromTable: "unqualified", - ToTables: []string{"t1"}, - }, { - FromTable: "badkeyspace", - ToTables: []string{"ks3.t1"}, - }, { - FromTable: "notfound", - ToTables: []string{"ks1.t2"}, - }}, - }, - Keyspaces: map[string]*vschemapb.Keyspace{ - "ks1": { - Sharded: true, - ForeignKeyMode: vschemapb.Keyspace_unmanaged, - Vindexes: map[string]*vschemapb.Vindex{ - "stfu1": { - Type: "stfu", - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{ - { - Column: "c1", - Name: "stfu1", - }, - }, - }, - }, - }, - "ks2": { - ForeignKeyMode: vschemapb.Keyspace_managed, - Tables: map[string]*vschemapb.Table{ - "t2": {}, - }, - }, - }, - } - got := BuildVSchema(&input, sqlparser.NewTestParser()) - ks1 := &Keyspace{ - Name: "ks1", - Sharded: true, - } - ks2 := &Keyspace{ - Name: "ks2", - } - vindex1 := &stFU{ - name: "stfu1", - } - t1 := &Table{ - Name: sqlparser.NewIdentifierCS("t1"), - Keyspace: ks1, - ColumnVindexes: []*ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{sqlparser.NewIdentifierCI("c1")}, - Type: "stfu", - Name: "stfu1", - Vindex: vindex1, - isUnique: vindex1.IsUnique(), - cost: vindex1.Cost(), - }}, - } - t1.Ordered = []*ColumnVindex{ - t1.ColumnVindexes[0], - } - t2 := &Table{ - Name: sqlparser.NewIdentifierCS("t2"), - Keyspace: ks2, - } - want := &VSchema{ - MirrorRules: map[string]*MirrorRule{}, - RoutingRules: map[string]*RoutingRule{ - "rt1": { - Error: errors.New("table rt1 has more than one target: [ks1.t1 ks2.t2]"), - }, - "rt2": { - Tables: []*Table{t2}, - }, - "escaped": { - Tables: []*Table{t2}, - }, - "dup": { - Error: errors.New("duplicate rule for entry dup"), - }, - "badname": { - Error: errors.New("invalid table name: 't1.t2.t3', it must be of the qualified form . (dots are not allowed in either name)"), - }, - "unqualified": { - Error: errors.New("invalid table name: 't1', it must be of the qualified form . (dots are not allowed in either name)"), - }, - "badkeyspace": { - Error: errors.New("VT05003: unknown database 'ks3' in vschema"), - }, - "notfound": { - Error: errors.New("table t2 not found"), - }, - }, - globalTables: map[string]*Table{ - "t1": t1, - "t2": t2, - }, - uniqueVindexes: map[string]Vindex{ - "stfu1": vindex1, - }, - Keyspaces: map[string]*KeyspaceSchema{ - "ks1": { - Keyspace: ks1, - ForeignKeyMode: vschemapb.Keyspace_unmanaged, - Tables: map[string]*Table{ - "t1": t1, - }, - Vindexes: map[string]Vindex{ - "stfu1": vindex1, - }, - }, - "ks2": { - ForeignKeyMode: vschemapb.Keyspace_managed, - Keyspace: ks2, - Tables: map[string]*Table{ - "t2": t2, - }, - Vindexes: map[string]Vindex{}, - }, - }, - } - gotb, _ := json.MarshalIndent(got, "", " ") - wantb, _ := json.MarshalIndent(want, "", " ") - assert.Equal(t, string(wantb), string(gotb), string(gotb)) -} - func TestVSchemaMirrorRules(t *testing.T) { input := vschemapb.SrvVSchema{ MirrorRules: &vschemapb.MirrorRules{ diff --git a/go/vt/vtgate/viper_config.go b/go/vt/vtgate/viper_config.go new file mode 100644 index 00000000000..68430b7be2c --- /dev/null +++ b/go/vt/vtgate/viper_config.go @@ -0,0 +1,50 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "vitess.io/vitess/go/viperutil" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" +) + +// DynamicViperConfig is a dynamic config that uses viper. +type DynamicViperConfig struct { + onlineDDL viperutil.Value[bool] + directDDL viperutil.Value[bool] + txMode viperutil.Value[vtgatepb.TransactionMode] +} + +// NewDynamicViperConfig creates a new dynamic viper config +func NewDynamicViperConfig() *DynamicViperConfig { + return &DynamicViperConfig{ + onlineDDL: enableOnlineDDL, + directDDL: enableDirectDDL, + txMode: transactionMode, + } +} + +func (d *DynamicViperConfig) OnlineEnabled() bool { + return d.onlineDDL.Get() +} + +func (d *DynamicViperConfig) DirectEnabled() bool { + return d.directDDL.Get() +} + +func (d *DynamicViperConfig) TransactionMode() vtgatepb.TransactionMode { + return d.txMode.Get() +} diff --git a/go/vt/vtgate/viperconfig.go b/go/vt/vtgate/viperconfig.go deleted file mode 100644 index ec77ff62d4f..00000000000 --- a/go/vt/vtgate/viperconfig.go +++ /dev/null @@ -1,16 +0,0 @@ -package vtgate - -import "vitess.io/vitess/go/viperutil" - -type dynamicViperConfig struct { - onlineDDL viperutil.Value[bool] - directDDL viperutil.Value[bool] -} - -func (d *dynamicViperConfig) OnlineEnabled() bool { - return d.onlineDDL.Get() -} - -func (d *dynamicViperConfig) DirectEnabled() bool { - return d.directDDL.Get() -} diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index 62ea2cd3455..290971c45ed 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -194,6 +194,10 @@ func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vinde // We mark the keyspaces that have foreign key management in Vitess and have cyclic foreign keys // to have an error. This makes all queries against them to fail. markErrorIfCyclesInFk(vschema) + // Add tables from schema tracking into globally routable tables, if they are not already present. + // We need to skip if already present, to handle the case where MoveTables has switched traffic + // and removed the source vschema but not from the source database because user asked to --keep-data + vindexes.AddAdditionalGlobalTables(v, vschema) } return vschema } diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 8bab05479dd..a1dcd3219f6 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -29,6 +29,7 @@ import ( "time" "github.com/spf13/pflag" + "github.com/spf13/viper" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/sqltypes" @@ -60,7 +61,6 @@ import ( ) var ( - transactionMode = "MULTI" normalizeQueries = true streamBufferSize = 32 * 1024 @@ -114,6 +114,33 @@ var ( }, ) + transactionMode = viperutil.Configure( + "transaction_mode", + viperutil.Options[vtgatepb.TransactionMode]{ + FlagName: "transaction_mode", + Default: vtgatepb.TransactionMode_MULTI, + Dynamic: true, + GetFunc: func(v *viper.Viper) func(key string) vtgatepb.TransactionMode { + return func(key string) vtgatepb.TransactionMode { + txMode := v.GetString(key) + switch strings.ToLower(txMode) { + case "single": + return vtgatepb.TransactionMode_SINGLE + case "multi": + return vtgatepb.TransactionMode_MULTI + case "twopc": + return vtgatepb.TransactionMode_TWOPC + default: + fmt.Printf("Invalid option: %v\n", txMode) + fmt.Println("Usage: -transaction_mode {SINGLE | MULTI | TWOPC}") + os.Exit(1) + return -1 + } + } + }, + }, + ) + // schema tracking flags enableSchemaChangeSignal = true enableViews bool @@ -138,7 +165,7 @@ var ( ) func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&transactionMode, "transaction_mode", transactionMode, "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") + fs.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") fs.BoolVar(&normalizeQueries, "normalize_queries", normalizeQueries, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.") fs.BoolVar(&terseErrors, "vtgate-config-terse-errors", terseErrors, "prevent bind vars from escaping in returned errors") fs.IntVar(&truncateErrorLen, "truncate-error-len", truncateErrorLen, "truncate errors sent to client if they are longer than this value (0 means do not truncate)") @@ -173,7 +200,11 @@ func registerFlags(fs *pflag.FlagSet) { fs.IntVar(&warmingReadsConcurrency, "warming-reads-concurrency", 500, "Number of concurrent warming reads allowed") fs.DurationVar(&warmingReadsQueryTimeout, "warming-reads-query-timeout", 5*time.Second, "Timeout of warming read queries") - viperutil.BindFlags(fs, enableOnlineDDL, enableDirectDDL) + viperutil.BindFlags(fs, + enableOnlineDDL, + enableDirectDDL, + transactionMode, + ) } func init() { @@ -181,25 +212,6 @@ func init() { servenv.OnParseFor("vtcombo", registerFlags) } -func getTxMode() vtgatepb.TransactionMode { - switch strings.ToLower(transactionMode) { - case "single": - log.Infof("Transaction mode: '%s'", transactionMode) - return vtgatepb.TransactionMode_SINGLE - case "multi": - log.Infof("Transaction mode: '%s'", transactionMode) - return vtgatepb.TransactionMode_MULTI - case "twopc": - log.Infof("Transaction mode: '%s'", transactionMode) - return vtgatepb.TransactionMode_TWOPC - default: - fmt.Printf("Invalid option: %v\n", transactionMode) - fmt.Println("Usage: -transaction_mode {SINGLE | MULTI | TWOPC}") - os.Exit(1) - return -1 - } -} - var ( // vschemaCounters needs to be initialized before planner to // catch the initial load stats. @@ -287,6 +299,8 @@ func Init( log.Fatalf("tabletGateway.WaitForTablets failed: %v", err) } + dynamicConfig := NewDynamicViperConfig() + // If we want to filter keyspaces replace the srvtopo.Server with a // filtering server if discovery.FilteringKeyspaces() { @@ -301,7 +315,7 @@ func Init( if _, err := schema.ParseDDLStrategy(defaultDDLStrategy); err != nil { log.Fatalf("Invalid value for -ddl_strategy: %v", err.Error()) } - tc := NewTxConn(gw, getTxMode()) + tc := NewTxConn(gw, dynamicConfig) // ScatterConn depends on TxConn to perform forced rollbacks. sc := NewScatterConn("VttabletCall", tc, gw) // TxResolver depends on TxConn to complete distributed transaction. @@ -352,6 +366,7 @@ func Init( noScatter, pv, warmingReadsPercent, + dynamicConfig, ) if err := executor.defaultQueryLogger(); err != nil { diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go index f997dc6ac0a..21375fb8eb3 100644 --- a/go/vt/vtorc/db/generate_base.go +++ b/go/vt/vtorc/db/generate_base.go @@ -69,10 +69,8 @@ CREATE TABLE database_instance ( last_sql_error TEXT not null default '', last_io_error TEXT not null default '', oracle_gtid TINYint not null default 0, - mariadb_gtid TINYint not null default 0, relay_log_file varchar(128) not null default '', relay_log_pos bigint not null default 0, - pseudo_gtid TINYint not null default 0, replication_depth TINYint not null default 0, has_replication_filters TINYint not null default 0, data_center varchar(32) not null default '', diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go index 3e9e81c5c9f..fa2e1a4ec95 100644 --- a/go/vt/vtorc/inst/analysis.go +++ b/go/vt/vtorc/inst/analysis.go @@ -108,7 +108,6 @@ type ReplicationAnalysis struct { Description string StructureAnalysis []StructureAnalysisCode OracleGTIDImmediateTopology bool - MariaDBGTIDImmediateTopology bool BinlogServerImmediateTopology bool SemiSyncPrimaryEnabled bool SemiSyncPrimaryStatus bool diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index 07830bf7dda..fc91c28b021 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -30,7 +30,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/util" @@ -54,7 +54,7 @@ type clusterAnalysis struct { hasClusterwideAction bool totalTablets int primaryAlias string - durability reparentutil.Durabler + durability policy.Durabler } // GetReplicationAnalysis will check for replication problems (dead primary; unreachable primary; etc) @@ -183,17 +183,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna ), 0 ) AS count_valid_semi_sync_replicas, - MIN( - primary_instance.mariadb_gtid - ) AS is_mariadb_gtid, - SUM(replica_instance.mariadb_gtid) AS count_mariadb_gtid_replicas, - IFNULL( - SUM( - replica_instance.last_checked <= replica_instance.last_seen - AND replica_instance.mariadb_gtid != 0 - ), - 0 - ) AS count_valid_mariadb_gtid_replicas, IFNULL( SUM( replica_instance.log_bin @@ -339,8 +328,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 - countValidMariaDBGTIDReplicas := m.GetUint("count_valid_mariadb_gtid_replicas") - a.MariaDBGTIDImmediateTopology = countValidMariaDBGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 countValidBinlogServerReplicas := m.GetUint("count_valid_binlog_server_replicas") a.BinlogServerImmediateTopology = countValidBinlogServerReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 a.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_primary_enabled") @@ -388,7 +375,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna log.Errorf("ignoring keyspace %v because no durability_policy is set. Please set it using SetKeyspaceDurabilityPolicy", a.AnalyzedKeyspace) return nil } - durability, err := reparentutil.GetDurabilityPolicy(durabilityPolicy) + durability, err := policy.GetDurabilityPolicy(durabilityPolicy) if err != nil { log.Errorf("can't get the durability policy %v - %v. Skipping keyspace - %v.", durabilityPolicy, err, a.AnalyzedKeyspace) return nil @@ -443,11 +430,11 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = PrimaryIsReadOnly a.Description = "Primary is read-only" // - } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && policy.SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustBeSet a.Description = "Primary semi-sync must be set" // - } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && policy.SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustNotBeSet a.Description = "Primary semi-sync must not be set" // @@ -485,11 +472,11 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = ReplicationStopped a.Description = "Replication is stopped" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && policy.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustBeSet a.Description = "Replica semi-sync must be set" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !policy.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustNotBeSet a.Description = "Replica semi-sync must not be set" // @@ -541,7 +528,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna } if a.IsPrimary && a.CountReplicas > 1 && !a.OracleGTIDImmediateTopology && - !a.MariaDBGTIDImmediateTopology && !a.BinlogServerImmediateTopology { a.StructureAnalysis = append(a.StructureAnalysis, NoFailoverSupportStructureWarning) } diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index c061d54ebb3..ae4f7279403 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/test" ) @@ -33,10 +34,10 @@ var ( // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. initialSQL = []string{ - `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, - `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, - `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, - `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,'vt-0000000112-relay-bin.000002',15815,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,'vt-0000000100-relay-bin.000002',15815,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,0,0,'',0,'',0,NULL,NULL,0,'','',0,'',0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, + `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,'vt-0000000200-relay-bin.000002',15815,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, @@ -70,7 +71,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -89,7 +90,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, ShardPrimaryTermTimestamp: "2022-12-28 07:23:25.129898+00:00", - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -107,7 +108,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 4, CountValidReplicas: 4, @@ -129,7 +130,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 0, IsPrimary: 1, @@ -149,7 +150,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 3, IsPrimary: 1, @@ -169,7 +170,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 0, CountReplicas: 4, CountValidReplicas: 2, @@ -191,7 +192,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -212,7 +213,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -234,7 +235,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -256,7 +257,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -278,7 +279,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -315,7 +316,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -333,7 +334,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -355,7 +356,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -373,7 +374,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 102}, }, @@ -395,7 +396,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -413,7 +414,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -436,7 +437,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -454,7 +455,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -477,7 +478,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -495,7 +496,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, @@ -520,7 +521,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -542,7 +543,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 0, @@ -562,7 +563,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -583,7 +584,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 1, @@ -605,7 +606,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { }, // Snapshot Keyspace KeyspaceType: 1, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, }}, keyspaceWanted: "ks", @@ -643,7 +644,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -663,7 +664,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, IsInvalid: 1, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -680,7 +681,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, IsInvalid: 1, }, { TabletInfo: &topodatapb.Tablet{ @@ -722,7 +723,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, IsInvalid: 1, }}, keyspaceWanted: "ks", @@ -740,7 +741,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -758,7 +759,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, ErrantGTID: "some errant GTID", PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, @@ -781,7 +782,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6708, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, LastCheckValid: 1, CountReplicas: 4, CountValidReplicas: 4, @@ -799,7 +800,7 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, ErrantGTID: "some errant GTID", PrimaryTabletInfo: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go index 36f47b7ab0b..fef1e90acce 100644 --- a/go/vt/vtorc/inst/instance.go +++ b/go/vt/vtorc/inst/instance.go @@ -56,8 +56,6 @@ type Instance struct { GTIDMode string SupportsOracleGTID bool UsingOracleGTID bool - UsingMariaDBGTID bool - UsingPseudoGTID bool // Legacy. Always 'false' ReadBinlogCoordinates BinlogCoordinates ExecBinlogCoordinates BinlogCoordinates IsDetached bool @@ -134,11 +132,6 @@ func (instance *Instance) MajorVersionString() string { return strings.Join(instance.MajorVersion(), ".") } -// IsMariaDB checks whether this is any version of MariaDB -func (instance *Instance) IsMariaDB() bool { - return strings.Contains(instance.Version, "MariaDB") -} - // IsPercona checks whether this is any version of Percona Server func (instance *Instance) IsPercona() bool { return strings.Contains(instance.VersionComment, "Percona") @@ -151,9 +144,6 @@ func (instance *Instance) IsBinlogServer() bool { // IsOracleMySQL checks whether this is an Oracle MySQL distribution func (instance *Instance) IsOracleMySQL() bool { - if instance.IsMariaDB() { - return false - } if instance.IsPercona() { return false } @@ -170,8 +160,6 @@ func (instance *Instance) applyFlavorName() { } if instance.IsOracleMySQL() { instance.FlavorName = "MySQL" - } else if instance.IsMariaDB() { - instance.FlavorName = "MariaDB" } else if instance.IsPercona() { instance.FlavorName = "Percona" } else { @@ -220,7 +208,7 @@ func (instance *Instance) SQLThreadUpToDate() bool { return instance.ReadBinlogCoordinates.Equals(&instance.ExecBinlogCoordinates) } -// UsingGTID returns true when this replica is currently replicating via GTID (either Oracle or MariaDB) +// UsingGTID returns true when this replica is currently replicating via GTID func (instance *Instance) UsingGTID() bool { - return instance.UsingOracleGTID || instance.UsingMariaDBGTID + return instance.UsingOracleGTID } diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go index d1421dbc91d..66aef7c8a78 100644 --- a/go/vt/vtorc/inst/instance_dao.go +++ b/go/vt/vtorc/inst/instance_dao.go @@ -291,7 +291,6 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.SQLDelay = fs.ReplicationStatus.SqlDelay instance.UsingOracleGTID = fs.ReplicationStatus.AutoPosition - instance.UsingMariaDBGTID = fs.ReplicationStatus.UsingGtid instance.SourceUUID = fs.ReplicationStatus.SourceUuid instance.HasReplicationFilters = fs.ReplicationStatus.HasReplicationFilters @@ -548,7 +547,6 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.GTIDMode = m.GetString("gtid_mode") instance.GtidPurged = m.GetString("gtid_purged") instance.GtidErrant = m.GetString("gtid_errant") - instance.UsingMariaDBGTID = m.GetBool("mariadb_gtid") instance.SelfBinlogCoordinates.LogFile = m.GetString("binary_log_file") instance.SelfBinlogCoordinates.LogPos = m.GetUint32("binary_log_pos") instance.ReadBinlogCoordinates.LogFile = m.GetString("source_log_file") @@ -849,8 +847,6 @@ func mkInsertForInstances(instances []*Instance, instanceWasActuallyFound bool, "gtid_mode", "gtid_purged", "gtid_errant", - "mariadb_gtid", - "pseudo_gtid", "source_log_file", "read_source_log_pos", "relay_source_log_file", @@ -930,8 +926,6 @@ func mkInsertForInstances(instances []*Instance, instanceWasActuallyFound bool, args = append(args, instance.GTIDMode) args = append(args, instance.GtidPurged) args = append(args, instance.GtidErrant) - args = append(args, instance.UsingMariaDBGTID) - args = append(args, instance.UsingPseudoGTID) args = append(args, instance.ReadBinlogCoordinates.LogFile) args = append(args, instance.ReadBinlogCoordinates.LogPos) args = append(args, instance.ExecBinlogCoordinates.LogFile) diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go index cc3217442ed..1a14041450c 100644 --- a/go/vt/vtorc/inst/instance_dao_test.go +++ b/go/vt/vtorc/inst/instance_dao_test.go @@ -63,14 +63,14 @@ func TestMkInsertSingle(t *testing.T) { (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_net_timeout, heartbeat_interval, - replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) VALUES - (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')) + (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')) ` a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, - false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,` + false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,` sql1, args1, err := mkInsertForInstances(instances[:1], false, true) require.NoError(t, err) @@ -86,17 +86,17 @@ func TestMkInsertThree(t *testing.T) { (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_net_timeout, heartbeat_interval, - replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) VALUES - (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')), - (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')), - (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')) + (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')), + (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')), + (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')) ` a3 := ` - zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, - zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, - zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, ` sql3, args3, err := mkInsertForInstances(instances[:3], true, true) diff --git a/go/vt/vtorc/inst/keyspace_dao.go b/go/vt/vtorc/inst/keyspace_dao.go index d764e3fc56a..4271886121e 100644 --- a/go/vt/vtorc/inst/keyspace_dao.go +++ b/go/vt/vtorc/inst/keyspace_dao.go @@ -22,7 +22,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/db" ) @@ -80,10 +80,10 @@ func SaveKeyspace(keyspace *topo.KeyspaceInfo) error { } // GetDurabilityPolicy gets the durability policy for the given keyspace. -func GetDurabilityPolicy(keyspace string) (reparentutil.Durabler, error) { +func GetDurabilityPolicy(keyspace string) (policy.Durabler, error) { ki, err := ReadKeyspace(keyspace) if err != nil { return nil, err } - return reparentutil.GetDurabilityPolicy(ki.DurabilityPolicy) + return policy.GetDurabilityPolicy(ki.DurabilityPolicy) } diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go index dda3ffaa9d2..ef2dd67379e 100644 --- a/go/vt/vtorc/inst/keyspace_dao_test.go +++ b/go/vt/vtorc/inst/keyspace_dao_test.go @@ -24,7 +24,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/db" ) @@ -48,7 +48,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { keyspaceName: "ks1", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, }, keyspaceWanted: nil, semiSyncAckersWanted: 1, @@ -72,12 +72,12 @@ func TestSaveAndReadKeyspace(t *testing.T) { keyspaceName: "ks4", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, BaseKeyspace: "baseKeyspace", }, keyspaceWanted: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, }, semiSyncAckersWanted: 0, }, { @@ -120,7 +120,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { return } require.NoError(t, err) - require.EqualValues(t, tt.semiSyncAckersWanted, reparentutil.SemiSyncAckers(durabilityPolicy, nil)) + require.EqualValues(t, tt.semiSyncAckersWanted, policy.SemiSyncAckers(durabilityPolicy, nil)) }) } } diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go index 5cbe139728b..8218af45db6 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" @@ -36,15 +37,15 @@ import ( var ( keyspaceDurabilityNone = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, } keyspaceDurabilitySemiSync = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, } keyspaceDurabilityTest = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "test", + DurabilityPolicy: policy.DurabilityTest, } keyspaceSnapshot = &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, @@ -106,7 +107,7 @@ func TestRefreshAllKeyspaces(t *testing.T) { // Set clusters to watch to watch all keyspaces clustersToWatch = nil // Change the durability policy of ks1 - reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", "semi_sync") + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", policy.DurabilitySemiSync) require.NoError(t, RefreshAllKeyspacesAndShards(context.Background())) // Verify that all the keyspaces are correctly reloaded @@ -144,7 +145,7 @@ func TestRefreshKeyspace(t *testing.T) { keyspaceName: "ks1", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "semi_sync", + DurabilityPolicy: policy.DurabilitySemiSync, }, keyspaceWanted: nil, err: "", @@ -169,12 +170,12 @@ func TestRefreshKeyspace(t *testing.T) { keyspaceName: "ks4", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, BaseKeyspace: "baseKeyspace", }, keyspaceWanted: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, - DurabilityPolicy: "none", + DurabilityPolicy: policy.DurabilityNone, }, err: "", }, { diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index f14eca624c9..0d0bbff5b53 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -29,6 +29,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/util" @@ -739,7 +740,7 @@ func fixPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (r return false, topologyRecovery, err } - if err := tabletUndoDemotePrimary(ctx, analyzedTablet, reparentutil.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { + if err := tabletUndoDemotePrimary(ctx, analyzedTablet, policy.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { return true, topologyRecovery, err } return true, topologyRecovery, nil @@ -782,7 +783,7 @@ func fixReplica(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (r return true, topologyRecovery, err } - err = setReplicationSource(ctx, analyzedTablet, primaryTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet), float64(analysisEntry.ReplicaNetTimeout)/2) + err = setReplicationSource(ctx, analyzedTablet, primaryTablet, policy.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet), float64(analysisEntry.ReplicaNetTimeout)/2) return true, topologyRecovery, err } @@ -817,6 +818,6 @@ func recoverErrantGTIDDetected(ctx context.Context, analysisEntry *inst.Replicat return false, topologyRecovery, err } - err = changeTabletType(ctx, analyzedTablet, topodatapb.TabletType_DRAINED, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + err = changeTabletType(ctx, analyzedTablet, topodatapb.TabletType_DRAINED, policy.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) return true, topologyRecovery, err } diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go index 2a95d3b2b0e..218a679bdb0 100644 --- a/go/vt/vtorc/test/recovery_analysis.go +++ b/go/vt/vtorc/test/recovery_analysis.go @@ -62,7 +62,6 @@ type InfoForRecoveryAnalysis struct { DowntimeEndTimestamp string DowntimeRemainingSeconds int CountValidOracleGTIDReplicas uint - CountValidMariaDBGTIDReplicas uint CountValidBinlogServerReplicas uint SemiSyncPrimaryEnabled int SemiSyncPrimaryStatus int @@ -94,7 +93,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["count_downtimed_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountDowntimedReplicas), Valid: true} rowMap["count_lagging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountLaggingReplicas), Valid: true} rowMap["count_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountLoggingReplicas), Valid: true} - rowMap["count_mariadb_gtid_replicas"] = sqlutils.CellData{Valid: false} rowMap["count_mixed_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountMixedBasedLoggingReplicas), Valid: true} rowMap["count_oracle_gtid_replicas"] = sqlutils.CellData{Valid: false} rowMap["count_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountReplicas), Valid: true} @@ -102,7 +100,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["count_semi_sync_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountSemiSyncReplicasEnabled), Valid: true} rowMap["count_statement_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountStatementBasedLoggingReplicas), Valid: true} rowMap["count_valid_binlog_server_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidBinlogServerReplicas), Valid: true} - rowMap["count_valid_mariadb_gtid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidMariaDBGTIDReplicas), Valid: true} rowMap["count_valid_oracle_gtid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidOracleGTIDReplicas), Valid: true} rowMap["count_valid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidReplicas), Valid: true} rowMap["count_valid_replicating_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidReplicatingReplicas), Valid: true} diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index 2d62b017433..e63cd028d05 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -702,7 +702,8 @@ func (f *FakeQueryService) StreamHealth(ctx context.Context, callback func(*quer // VStream is part of the queryservice.QueryService interface func (f *FakeQueryService) VStream(ctx context.Context, request *binlogdatapb.VStreamRequest, send func([]*binlogdatapb.VEvent) error) error { - panic("not implemented") + // This is called as part of vreplication unit tests, so we don't panic here. + return fmt.Errorf("VStream not implemented") } // VStreamRows is part of the QueryService interface. diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index 22fe72716dd..9f906317edf 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -22,7 +22,7 @@ import ( "time" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" @@ -136,12 +136,12 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req l.Errorf("Failed to get durability policy, error: %v", err) return } - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { l.Errorf("Failed to get durability with name %v, error: %v", durabilityName, err) } - isSemiSync := reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tabletInfo.Tablet) + isSemiSync := policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tabletInfo.Tablet) semiSyncAction, err := tm.convertBoolToSemiSyncAction(bgCtx, isSemiSync) if err != nil { l.Errorf("Failed to convert bool to semisync action, error: %v", err) diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index 3f8bc85ac7f..762b384a5f6 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -27,6 +27,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/constants/sidecar" @@ -1783,6 +1785,14 @@ func addInvariants(dbClient *binlogplayer.MockDBClient, vreplID, sourceTabletUID )) dbClient.AddInvariant(fmt.Sprintf(updatePickedSourceTablet, cell, sourceTabletUID, vreplID), &sqltypes.Result{}) dbClient.AddInvariant("update _vt.vreplication set state='Running', message='' where id=1", &sqltypes.Result{}) + dbClient.AddInvariant(vreplication.SqlMaxAllowedPacket, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "max_allowed_packet", + "int64", + ), + "65536", + )) + dbClient.AddInvariant("update _vt.vreplication set message", &sqltypes.Result{}) } func addMaterializeSettingsTablesToSchema(ms *vtctldatapb.MaterializeSettings, tenv *testEnv, venv *vtenv.Environment) { diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 84150c82be8..fbef04de357 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -70,7 +70,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" @@ -1011,7 +1011,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t return "", vterrors.Wrapf(err, "cannot read keyspace durability policy %v", tablet.Keyspace) } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return "", vterrors.Wrapf(err, "cannot get durability policy %v", durabilityName) } @@ -1020,7 +1020,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t tablet.Type = tabletType - semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, policy.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) if err != nil { return "", err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 98e36119622..31ab895934c 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -125,7 +125,8 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map settings.StopPos = pausePos saveStop = false } - + log.Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, filter: %+v", + vr.id, settings.StartPos, settings.StopPos, vr.source.Filter) queryFunc := func(ctx context.Context, sql string) (*sqltypes.Result, error) { return vr.dbClient.ExecuteWithRetry(ctx, sql) } @@ -142,7 +143,7 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map maxAllowedPacket := int64(vr.workflowConfig.RelayLogMaxSize) // We explicitly do NOT want to batch this, we want to send it down the wire // immediately so we use ExecuteFetch directly. - res, err := vr.dbClient.ExecuteFetch("select @@session.max_allowed_packet as max_allowed_packet", 1) + res, err := vr.dbClient.ExecuteFetch(SqlMaxAllowedPacket, 1) if err != nil { log.Errorf("Error getting max_allowed_packet, will use the relay_log_max_size value of %d bytes: %v", vr.workflowConfig.RelayLogMaxSize, err) } else { diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 42701288a44..76177b56b5b 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -89,6 +89,7 @@ const ( json_unquote(json_extract(action, '$.type'))=%a and vrepl_id=%a and table_name=%a` sqlDeletePostCopyAction = `delete from _vt.post_copy_action where vrepl_id=%a and table_name=%a and id=%a` + SqlMaxAllowedPacket = "select @@session.max_allowed_packet as max_allowed_packet" ) // vreplicator provides the core logic to start vreplication streams diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index 14fcc6d0f2e..141d8257062 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -69,6 +69,7 @@ func NewPool(env tabletenv.Env, name string, cfg tabletenv.ConnPoolConfig) *Pool config := smartconnpool.Config[*Conn]{ Capacity: int64(cfg.Size), IdleTimeout: cfg.IdleTimeout, + MaxIdleCount: int64(cfg.MaxIdleCount), MaxLifetime: cfg.MaxLifetime, RefreshInterval: mysqlctl.PoolDynamicHostnameResolution, } diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go index 8cf27cbb327..c305c61b7b4 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool_test.go +++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go @@ -55,10 +55,10 @@ func TestConnPoolTimeout(t *testing.T) { defer db.Close() cfg := tabletenv.ConnPoolConfig{ - Size: 1, + Size: 1, + Timeout: time.Second, + IdleTimeout: 10 * time.Second, } - cfg.Timeout = time.Second - cfg.IdleTimeout = 10 * time.Second connPool := NewPool(tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "PoolTest"), "TestPool", cfg) params := dbconfigs.New(db.ConnParams()) connPool.Open(params, params, params) @@ -135,6 +135,58 @@ func TestConnPoolSetCapacity(t *testing.T) { } } +// TestConnPoolMaxIdleCount tests the max idle count for the pool. +// The pool should close the idle connections if the idle count is more than the allowed idle count. +// Changing the pool capacity will affect the idle count allowed for that pool. +func TestConnPoolMaxIdleCount(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + cfg := tabletenv.ConnPoolConfig{ + Size: 5, + MaxIdleCount: 2, + } + connPool := NewPool(tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "PoolTest"), "TestPool", cfg) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) + defer connPool.Close() + + assert.EqualValues(t, 5, connPool.Capacity(), "pool capacity should be 5") + assert.EqualValues(t, 2, connPool.IdleCount(), "pool idle count should be 2") + + var conns []*PooledConn + for i := 0; i < 3; i++ { + conn, err := connPool.Get(context.Background(), nil) + require.NoError(t, err) + conns = append(conns, conn) + } + + // after recycle - 1 idle connection + conns[0].Recycle() + assert.Zero(t, connPool.Metrics.IdleClosed(), "pool idle closed should be 0") + + // after recycle - 2 idle connection + conns[1].Recycle() + assert.Zero(t, connPool.Metrics.IdleClosed(), "pool idle closed should be 0") + + // after recycle - 3 idle connection, 1 will be closed + conns[2].Recycle() + assert.EqualValues(t, 1, connPool.Metrics.IdleClosed(), "pool idle closed should be 1") + + // changing the pool capacity will affect the idle count allowed for that pool. + // If setting the capacity to lower value than max idle count. + + err := connPool.SetCapacity(context.Background(), 4) + require.NoError(t, err) + assert.EqualValues(t, 4, connPool.Capacity(), "pool capacity should be 4") + assert.EqualValues(t, 2, connPool.IdleCount(), "pool idle count should be 2") + + err = connPool.SetCapacity(context.Background(), 1) + require.NoError(t, err) + assert.EqualValues(t, 1, connPool.Capacity(), "pool capacity should be 1") + assert.EqualValues(t, 1, connPool.IdleCount(), "pool idle count should be changed to 1") +} + func TestConnPoolStatJSON(t *testing.T) { db := fakesqldb.New(t) defer db.Close() diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index 994999f2368..42cc300f92d 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -143,6 +143,9 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.DurationVar(¤tConfig.OltpReadPool.Timeout, "queryserver-config-query-pool-timeout", defaultConfig.OltpReadPool.Timeout, "query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.") fs.DurationVar(¤tConfig.OlapReadPool.Timeout, "queryserver-config-stream-pool-timeout", defaultConfig.OlapReadPool.Timeout, "query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.") fs.DurationVar(¤tConfig.TxPool.Timeout, "queryserver-config-txpool-timeout", defaultConfig.TxPool.Timeout, "query server transaction pool timeout, it is how long vttablet waits if tx pool is full") + fs.IntVar(¤tConfig.OltpReadPool.MaxIdleCount, "queryserver-config-query-pool-max-idle-count", defaultConfig.OltpReadPool.MaxIdleCount, "query server query pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods.") + fs.IntVar(¤tConfig.OlapReadPool.MaxIdleCount, "queryserver-config-stream-pool-max-idle-count", defaultConfig.OlapReadPool.MaxIdleCount, "query server stream pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods.") + fs.IntVar(¤tConfig.TxPool.MaxIdleCount, "queryserver-config-txpool-max-idle-count", defaultConfig.TxPool.MaxIdleCount, "query server transaction pool - maximum number of idle connections to retain in the pool. Use this to balance between faster response times during traffic bursts and resource efficiency during low-traffic periods.") fs.DurationVar(¤tConfig.OltpReadPool.IdleTimeout, "queryserver-config-idle-timeout", defaultConfig.OltpReadPool.IdleTimeout, "query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.") fs.DurationVar(¤tConfig.OltpReadPool.MaxLifetime, "queryserver-config-pool-conn-max-lifetime", defaultConfig.OltpReadPool.MaxLifetime, "query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool.") @@ -424,6 +427,7 @@ type ConnPoolConfig struct { Size int `json:"size,omitempty"` Timeout time.Duration `json:"timeoutSeconds,omitempty"` IdleTimeout time.Duration `json:"idleTimeoutSeconds,omitempty"` + MaxIdleCount int `json:"maxIdleCount,omitempty"` MaxLifetime time.Duration `json:"maxLifetimeSeconds,omitempty"` PrefillParallelism int `json:"prefillParallelism,omitempty"` } @@ -433,9 +437,10 @@ func (cfg *ConnPoolConfig) MarshalJSON() ([]byte, error) { tmp := struct { Proxy - Timeout string `json:"timeoutSeconds,omitempty"` - IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` - MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` + Timeout string `json:"timeoutSeconds,omitempty"` + IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` + MaxIdleCount int `json:"maxIdleCount,omitempty"` + MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` }{ Proxy: Proxy(*cfg), } @@ -460,6 +465,7 @@ func (cfg *ConnPoolConfig) UnmarshalJSON(data []byte) (err error) { Size int `json:"size,omitempty"` Timeout string `json:"timeoutSeconds,omitempty"` IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` + MaxIdleCount int `json:"maxIdleCount,omitempty"` MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` PrefillParallelism int `json:"prefillParallelism,omitempty"` } @@ -490,6 +496,7 @@ func (cfg *ConnPoolConfig) UnmarshalJSON(data []byte) (err error) { } cfg.Size = tmp.Size + cfg.MaxIdleCount = tmp.MaxIdleCount cfg.PrefillParallelism = tmp.PrefillParallelism return nil diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go index d16b6276964..9ae653bafb9 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go @@ -28,13 +28,12 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/yaml2" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) func TestConfigParse(t *testing.T) { @@ -49,10 +48,11 @@ func TestConfigParse(t *testing.T) { }, }, OltpReadPool: ConnPoolConfig{ - Size: 16, - Timeout: 10 * time.Second, - IdleTimeout: 20 * time.Second, - MaxLifetime: 50 * time.Second, + Size: 16, + Timeout: 10 * time.Second, + IdleTimeout: 20 * time.Second, + MaxLifetime: 50 * time.Second, + MaxIdleCount: 8, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000, @@ -113,6 +113,7 @@ txPool: {} oltpReadPool: size: 16 idleTimeoutSeconds: 20s + maxIdleCount: 8 maxLifetimeSeconds: 50s `) gotCfg := cfg diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 1a3a45cf99b..e17f56de11f 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -131,7 +132,7 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAl return err } log.Infof("Getting a new durability policy for %v", durabilityName) - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return err } @@ -152,7 +153,7 @@ func (wr *Wrangler) TabletExternallyReparented(ctx context.Context, newPrimaryAl }() event.DispatchUpdate(ev, "starting external reparent") - if err := wr.tmc.ChangeType(ctx, tablet, topodatapb.TabletType_PRIMARY, reparentutil.SemiSyncAckers(durability, tablet) > 0); err != nil { + if err := wr.tmc.ChangeType(ctx, tablet, topodatapb.TabletType_PRIMARY, policy.SemiSyncAckers(durability, tablet) > 0); err != nil { log.Warningf("Error calling ChangeType on new primary %v: %v", topoproto.TabletAliasString(newPrimaryAlias), err) return err } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index fdc6f9a92ac..31a5a7936ad 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -141,12 +142,12 @@ func (wr *Wrangler) shouldSendSemiSyncAck(ctx context.Context, tablet *topodatap if err != nil { return false, err } - durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + durability, err := policy.GetDurabilityPolicy(durabilityName) if err != nil { return false, err } - return reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet), nil + return policy.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet), nil } func (wr *Wrangler) getShardPrimaryForTablet(ctx context.Context, tablet *topodatapb.Tablet) (*topo.TabletInfo, error) { diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 3167be5e512..984ff93095e 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -60,7 +61,7 @@ func TestEmergencyReparentShard(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.SetPrimaryPositionLocked(replication.Position{ @@ -211,7 +212,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) moreAdvancedReplica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.Replicating = true diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 1894c6bb4eb..f160ddfa32b 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" @@ -60,7 +61,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell2", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -177,7 +178,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -312,7 +313,7 @@ func TestPlannedReparentInitialization(t *testing.T) { newPrimary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) goodReplica2 := NewFakeTablet(t, wr, "cell2", 3, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true @@ -691,7 +692,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { // Create a primary, a couple good replicas primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) goodReplica1 := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // old primary primary.FakeMysqlDaemon.ReadOnly = false diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index b199a64340a..7012822a017 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/policy" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -141,7 +142,7 @@ func TestReparentTablet(t *testing.T) { } primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil) replica := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // mark the primary inside the shard if _, err := ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error { @@ -197,7 +198,7 @@ func TestSetReplicationSource(t *testing.T) { require.NoError(t, err, "CreateShard failed") primary := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_PRIMARY, nil) - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") + reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", policy.DurabilitySemiSync) // mark the primary inside the shard _, err = ts.UpdateShardFields(ctx, "test_keyspace", "0", func(si *topo.ShardInfo) error { diff --git a/java/example/pom.xml b/java/example/pom.xml index fa3220f51bd..fabab27dd77 100644 --- a/java/example/pom.xml +++ b/java/example/pom.xml @@ -32,7 +32,7 @@ mysql mysql-connector-java - 8.0.28 + 8.0.33 false diff --git a/java/grpc-client/src/test/resources/ca.config b/java/grpc-client/src/test/resources/ca.config index e0955f28ccf..c5758831e06 100644 --- a/java/grpc-client/src/test/resources/ca.config +++ b/java/grpc-client/src/test/resources/ca.config @@ -2,6 +2,7 @@ default_bits = 1024 default_keyfile = keyfile.pem distinguished_name = req_distinguished_name + x509_extensions = v3_ca attributes = req_attributes prompt = no output_password = mypass @@ -15,3 +16,5 @@ emailAddress = test@email.address [ req_attributes ] challengePassword = A challenge password +[ v3_ca ] + basicConstraints = CA:TRUE diff --git a/java/pom.xml b/java/pom.xml index 6742258a6b8..e68bbd5574e 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -72,8 +72,8 @@ 4.1.110.Final 2.0.65.Final - 4.28.3 - 3.24.3 + 3.25.5 + 3.25.5 3.0.0 2.24.1 diff --git a/test/config.json b/test/config.json index da0026f0125..dfc0910f10e 100644 --- a/test/config.json +++ b/test/config.json @@ -604,7 +604,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_queries_subquery": { "File": "unused.go", @@ -613,7 +613,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_queries_union": { "File": "unused.go", @@ -622,7 +622,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_queries_insert": { "File": "unused.go", @@ -631,7 +631,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_queries_vexplain": { "File": "unused.go", @@ -640,7 +640,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_queries_reference": { "File": "unused.go", @@ -649,7 +649,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_queries_random": { "File": "unused.go", @@ -658,7 +658,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_kill": { "File": "unused.go", @@ -667,7 +667,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": ["upgrade_downgrade_query_serving_queries"] + "Tags": ["upgrade_downgrade_query_serving_queries_2"] }, "vtgate_concurrentdml": { "File": "unused.go", @@ -1310,6 +1310,15 @@ "RetryMax": 1, "Tags": [] }, + "global_routing": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestGlobalRouting", "-timeout", "30m"], + "Command": [], + "Manual": false, + "Shard": "vreplication_v2", + "RetryMax": 1, + "Tags": [] + }, "vreplication_fk": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestFKWorkflow"], @@ -1346,18 +1355,9 @@ "RetryMax": 1, "Tags": [] }, - "vreplication_vtctl_migrate": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctlMigrate", "-timeout", "30m"], - "Command": [], - "Manual": false, - "Shard": "vreplication_migrate", - "RetryMax": 1, - "Tags": [] - }, - "vreplication_vtctld_migrate": { + "vreplication_migrate": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctldMigrate", "-timeout", "30m"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMigrate", "-timeout", "30m"], "Command": [], "Manual": false, "Shard": "vreplication_migrate", diff --git a/test/local_example.sh b/test/local_example.sh index 391e75a9224..27f512a34eb 100755 --- a/test/local_example.sh +++ b/test/local_example.sh @@ -98,5 +98,11 @@ mysql --table < ../common/select_customer80-_data.sql ./306_down_shard_0.sh -./401_teardown.sh +./401_backup.sh + +./402_list_backup.sh + +./403_restore_from_backup.sh + +./501_teardown.sh diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index 8ad7c67a5b4..5439928837c 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -28,6 +28,7 @@ "react": "^17.0.2", "react-dom": "^17.0.2", "react-flow-renderer": "^10.3.17", + "react-json-tree": "^0.19.0", "react-query": "^3.5.9", "react-router-dom": "^5.3.4", "react-tiny-popover": "^6.0.5", @@ -4828,8 +4829,7 @@ "node_modules/@types/lodash": { "version": "4.17.0", "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.0.tgz", - "integrity": "sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==", - "dev": true + "integrity": "sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==" }, "node_modules/@types/lodash-es": { "version": "4.17.12", @@ -6633,6 +6633,19 @@ "node": ">=6" } }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -6649,6 +6662,16 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, "node_modules/colord": { "version": "2.9.3", "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", @@ -15231,6 +15254,18 @@ "node": ">=0.10.0" } }, + "node_modules/react-base16-styling": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/react-base16-styling/-/react-base16-styling-0.10.0.tgz", + "integrity": "sha512-H1k2eFB6M45OaiRru3PBXkuCcn2qNmx+gzLb4a9IPMR7tMH8oBRXU5jGbPDYG1Hz+82d88ED0vjR8BmqU3pQdg==", + "license": "MIT", + "dependencies": { + "@types/lodash": "^4.17.0", + "color": "^4.2.3", + "csstype": "^3.1.3", + "lodash-es": "^4.17.21" + } + }, "node_modules/react-dom": { "version": "17.0.2", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", @@ -15288,6 +15323,20 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==" }, + "node_modules/react-json-tree": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/react-json-tree/-/react-json-tree-0.19.0.tgz", + "integrity": "sha512-PqT1WRVcWP+RROsZPQfNEKIC1iM/ZMfY4g5jN6oDnXp5593PPRAYgoHcgYCDjflAHQMtxl8XGdlTwIBdEGUXvw==", + "license": "MIT", + "dependencies": { + "@types/lodash": "^4.17.0", + "react-base16-styling": "^0.10.0" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, "node_modules/react-query": { "version": "3.39.3", "resolved": "https://registry.npmjs.org/react-query/-/react-query-3.39.3.tgz", @@ -16161,6 +16210,21 @@ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/simple-swizzle/node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", + "license": "MIT" + }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json index 5d4d5dc787a..3d850fd1cc9 100644 --- a/web/vtadmin/package.json +++ b/web/vtadmin/package.json @@ -27,6 +27,7 @@ "react": "^17.0.2", "react-dom": "^17.0.2", "react-flow-renderer": "^10.3.17", + "react-json-tree": "^0.19.0", "react-query": "^3.5.9", "react-router-dom": "^5.3.4", "react-tiny-popover": "^6.0.5", diff --git a/web/vtadmin/src/components/jsonViewTree/JSONViewTree.tsx b/web/vtadmin/src/components/jsonViewTree/JSONViewTree.tsx new file mode 100644 index 00000000000..af4e71db38a --- /dev/null +++ b/web/vtadmin/src/components/jsonViewTree/JSONViewTree.tsx @@ -0,0 +1,95 @@ +/** + * Copyright 2025 The Vitess Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useState } from 'react'; +import { JSONTree } from 'react-json-tree'; + +const vtAdminTheme = { + scheme: 'vtadmin', + author: 'custom', + base00: '#ffffff', + base01: '#f6f8fa', + base02: '#e6e8eb', + base03: '#8c8c8c', + base04: '#3c3c3c', + base05: '#2c2c2c', + base06: '#0057b8', + base07: '#000000', + base08: '#00875a', + base09: '#2c2c2c', + base0A: '#e44d26', + base0B: '#2c2c2c', + base0C: '#1a73e8', + base0D: '#3d5afe', + base0E: '#3cba54', + base0F: '#ff6f61', +}; + +interface JSONViewTreeProps { + data: any; +} + +const JSONViewTree: React.FC = ({ data }) => { + const [expandAll, setExpandAll] = useState(false); + const [treeKey, setTreeKey] = useState(0); + + const handleExpand = () => { + setExpandAll(true); + setTreeKey((prev) => prev + 1); + }; + + const handleCollapse = () => { + setExpandAll(false); + setTreeKey((prev) => prev + 1); + }; + + const getItemString = (type: string, data: any) => { + if (Array.isArray(data)) { + return `${type}[${data.length}]`; + } + return type; + }; + + if (!data) return null; + return ( +
+
+ + +
+ expandAll} + /> +
+ ); +}; + +export default JSONViewTree; diff --git a/web/vtadmin/src/components/routes/keyspace/Keyspace.tsx b/web/vtadmin/src/components/routes/keyspace/Keyspace.tsx index 61bf52bb189..068f9cea2cd 100644 --- a/web/vtadmin/src/components/routes/keyspace/Keyspace.tsx +++ b/web/vtadmin/src/components/routes/keyspace/Keyspace.tsx @@ -19,7 +19,6 @@ import { Link, Redirect, Route } from 'react-router-dom'; import { useKeyspace } from '../../../hooks/api'; import { useDocumentTitle } from '../../../hooks/useDocumentTitle'; import { isReadOnlyMode } from '../../../util/env'; -import { Code } from '../../Code'; import { ContentContainer } from '../../layout/ContentContainer'; import { NavCrumbs } from '../../layout/NavCrumbs'; import { WorkspaceHeader } from '../../layout/WorkspaceHeader'; @@ -32,6 +31,8 @@ import { Advanced } from './Advanced'; import style from './Keyspace.module.scss'; import { KeyspaceShards } from './KeyspaceShards'; import { KeyspaceVSchema } from './KeyspaceVSchema'; +import JSONViewTree from '../../jsonViewTree/JSONViewTree'; +import { Code } from '../../Code'; interface RouteParams { clusterID: string; @@ -94,6 +95,7 @@ export const Keyspace = () => { + @@ -114,6 +116,11 @@ export const Keyspace = () => { + + + + + {!isReadOnlyMode() && ( diff --git a/web/vtadmin/src/components/routes/shard/Shard.tsx b/web/vtadmin/src/components/routes/shard/Shard.tsx index 686dc089d8f..8f713be9b13 100644 --- a/web/vtadmin/src/components/routes/shard/Shard.tsx +++ b/web/vtadmin/src/components/routes/shard/Shard.tsx @@ -24,12 +24,13 @@ import { WorkspaceTitle } from '../../layout/WorkspaceTitle'; import { ContentContainer } from '../../layout/ContentContainer'; import { Tab } from '../../tabs/Tab'; import { TabContainer } from '../../tabs/TabContainer'; -import { Code } from '../../Code'; import { useDocumentTitle } from '../../../hooks/useDocumentTitle'; import { KeyspaceLink } from '../../links/KeyspaceLink'; import { useKeyspace } from '../../../hooks/api'; import { ShardTablets } from './ShardTablets'; import Advanced from './Advanced'; +import JSONViewTree from '../../jsonViewTree/JSONViewTree'; +import { Code } from '../../Code'; interface RouteParams { clusterID: string; @@ -114,6 +115,7 @@ export const Shard = () => { + @@ -123,6 +125,7 @@ export const Shard = () => { {shard && } + {shard && } diff --git a/web/vtadmin/src/components/routes/stream/Stream.tsx b/web/vtadmin/src/components/routes/stream/Stream.tsx index 45e9846315a..6143ac3e302 100644 --- a/web/vtadmin/src/components/routes/stream/Stream.tsx +++ b/web/vtadmin/src/components/routes/stream/Stream.tsx @@ -13,17 +13,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import { Link, useParams } from 'react-router-dom'; +import { Link, Redirect, Route, Switch, useParams, useRouteMatch } from 'react-router-dom'; import { useWorkflow } from '../../../hooks/api'; import { useDocumentTitle } from '../../../hooks/useDocumentTitle'; import { formatStreamKey, getStream } from '../../../util/workflows'; -import { Code } from '../../Code'; import { ContentContainer } from '../../layout/ContentContainer'; import { NavCrumbs } from '../../layout/NavCrumbs'; import { WorkspaceHeader } from '../../layout/WorkspaceHeader'; import { WorkspaceTitle } from '../../layout/WorkspaceTitle'; import style from './Stream.module.scss'; +import JSONViewTree from '../../jsonViewTree/JSONViewTree'; +import { TabContainer } from '../../tabs/TabContainer'; +import { Tab } from '../../tabs/Tab'; +import { Code } from '../../Code'; interface RouteParams { clusterID: string; @@ -36,6 +39,7 @@ interface RouteParams { export const Stream = () => { const params = useParams(); + const { path, url } = useRouteMatch(); const { data: workflow } = useWorkflow({ clusterID: params.clusterID, keyspace: params.keyspace, @@ -72,7 +76,17 @@ export const Stream = () => { - + + + + + + + {stream && } + {stream && } + + + ); diff --git a/web/vtadmin/src/components/routes/tablet/Tablet.tsx b/web/vtadmin/src/components/routes/tablet/Tablet.tsx index b0181251fd0..f4a9ca20248 100644 --- a/web/vtadmin/src/components/routes/tablet/Tablet.tsx +++ b/web/vtadmin/src/components/routes/tablet/Tablet.tsx @@ -19,7 +19,6 @@ import { useExperimentalTabletDebugVars, useTablet } from '../../../hooks/api'; import { useDocumentTitle } from '../../../hooks/useDocumentTitle'; import { isReadOnlyMode } from '../../../util/env'; import { formatDisplayType, formatState } from '../../../util/tablets'; -import { Code } from '../../Code'; import { ContentContainer } from '../../layout/ContentContainer'; import { NavCrumbs } from '../../layout/NavCrumbs'; import { WorkspaceHeader } from '../../layout/WorkspaceHeader'; @@ -34,6 +33,8 @@ import style from './Tablet.module.scss'; import { TabletCharts } from './TabletCharts'; import { env } from '../../../util/env'; import FullStatus from './FullStatus'; +import JSONViewTree from '../../jsonViewTree/JSONViewTree'; +import { Code } from '../../Code'; interface RouteParams { alias: string; @@ -108,6 +109,7 @@ export const Tablet = () => { + @@ -128,6 +130,14 @@ export const Tablet = () => { + +
+ + + {env().VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS && } +
+
+ {tablet && } {!isReadOnlyMode() && ( diff --git a/web/vtadmin/src/components/routes/workflow/Workflow.tsx b/web/vtadmin/src/components/routes/workflow/Workflow.tsx index 939d1c6d386..7b3f0df32a5 100644 --- a/web/vtadmin/src/components/routes/workflow/Workflow.tsx +++ b/web/vtadmin/src/components/routes/workflow/Workflow.tsx @@ -30,11 +30,12 @@ import { ContentContainer } from '../../layout/ContentContainer'; import { TabContainer } from '../../tabs/TabContainer'; import { Tab } from '../../tabs/Tab'; import { getStreams } from '../../../util/workflows'; -import { Code } from '../../Code'; import { ShardLink } from '../../links/ShardLink'; import { WorkflowVDiff } from './WorkflowVDiff'; import { Select } from '../../inputs/Select'; import { formatDateTimeShort } from '../../../util/time'; +import JSONViewTree from '../../jsonViewTree/JSONViewTree'; +import { Code } from '../../Code'; interface RouteParams { clusterID: string; @@ -168,6 +169,7 @@ export const Workflow = () => { + @@ -192,6 +194,10 @@ export const Workflow = () => { + + + +